1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the SelectionDAG::Legalize method.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/DebugInfo.h"
22 #include "llvm/IR/CallingConv.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetFrameLowering.h"
33 #include "llvm/Target/TargetLowering.h"
34 #include "llvm/Target/TargetMachine.h"
35 using namespace llvm;
36
37 //===----------------------------------------------------------------------===//
38 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
39 /// hacks on it until the target machine can handle it. This involves
40 /// eliminating value sizes the machine cannot handle (promoting small sizes to
41 /// large sizes or splitting up large values into small values) as well as
42 /// eliminating operations the machine cannot handle.
43 ///
44 /// This code also does a small amount of optimization and recognition of idioms
45 /// as part of its processing. For example, if a target does not support a
46 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
47 /// will attempt merge setcc and brc instructions into brcc's.
48 ///
49 namespace {
50 class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener {
51 const TargetMachine &TM;
52 const TargetLowering &TLI;
53 SelectionDAG &DAG;
54
55 /// LegalizePosition - The iterator for walking through the node list.
56 SelectionDAG::allnodes_iterator LegalizePosition;
57
58 /// LegalizedNodes - The set of nodes which have already been legalized.
59 SmallPtrSet<SDNode *, 16> LegalizedNodes;
60
61 // Libcall insertion helpers.
62
63 public:
64 explicit SelectionDAGLegalize(SelectionDAG &DAG);
65
66 void LegalizeDAG();
67
68 private:
69 /// LegalizeOp - Legalizes the given operation.
70 void LegalizeOp(SDNode *Node);
71
72 SDValue OptimizeFloatStore(StoreSDNode *ST);
73
74 void LegalizeLoadOps(SDNode *Node);
75 void LegalizeStoreOps(SDNode *Node);
76
77 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
78 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
79 /// is necessary to spill the vector being inserted into to memory, perform
80 /// the insert there, and then read the result back.
81 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val,
82 SDValue Idx, DebugLoc dl);
83 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
84 SDValue Idx, DebugLoc dl);
85
86 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
87 /// performs the same shuffe in terms of order or result bytes, but on a type
88 /// whose vector element type is narrower than the original shuffle type.
89 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
90 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
91 SDValue N1, SDValue N2,
92 ArrayRef<int> Mask) const;
93
94 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
95 DebugLoc dl);
96
97 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
98 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops,
99 unsigned NumOps, bool isSigned, DebugLoc dl);
100
101 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
102 SDNode *Node, bool isSigned);
103 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
104 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
105 RTLIB::Libcall Call_F128,
106 RTLIB::Libcall Call_PPCF128);
107 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned,
108 RTLIB::Libcall Call_I8,
109 RTLIB::Libcall Call_I16,
110 RTLIB::Libcall Call_I32,
111 RTLIB::Libcall Call_I64,
112 RTLIB::Libcall Call_I128);
113 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
114 void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
115
116 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl);
117 SDValue ExpandBUILD_VECTOR(SDNode *Node);
118 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
119 void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
120 SmallVectorImpl<SDValue> &Results);
121 SDValue ExpandFCOPYSIGN(SDNode *Node);
122 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT,
123 DebugLoc dl);
124 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned,
125 DebugLoc dl);
126 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned,
127 DebugLoc dl);
128
129 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl);
130 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl);
131
132 SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
133 SDValue ExpandInsertToVectorThroughStack(SDValue Op);
134 SDValue ExpandVectorBuildThroughStack(SDNode* Node);
135
136 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP);
137
138 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
139
140 void ExpandNode(SDNode *Node);
141 void PromoteNode(SDNode *Node);
142
ForgetNode(SDNode * N)143 void ForgetNode(SDNode *N) {
144 LegalizedNodes.erase(N);
145 if (LegalizePosition == SelectionDAG::allnodes_iterator(N))
146 ++LegalizePosition;
147 }
148
149 public:
150 // DAGUpdateListener implementation.
NodeDeleted(SDNode * N,SDNode * E)151 virtual void NodeDeleted(SDNode *N, SDNode *E) {
152 ForgetNode(N);
153 }
NodeUpdated(SDNode * N)154 virtual void NodeUpdated(SDNode *N) {}
155
156 // Node replacement helpers
ReplacedNode(SDNode * N)157 void ReplacedNode(SDNode *N) {
158 if (N->use_empty()) {
159 DAG.RemoveDeadNode(N);
160 } else {
161 ForgetNode(N);
162 }
163 }
ReplaceNode(SDNode * Old,SDNode * New)164 void ReplaceNode(SDNode *Old, SDNode *New) {
165 DAG.ReplaceAllUsesWith(Old, New);
166 ReplacedNode(Old);
167 }
ReplaceNode(SDValue Old,SDValue New)168 void ReplaceNode(SDValue Old, SDValue New) {
169 DAG.ReplaceAllUsesWith(Old, New);
170 ReplacedNode(Old.getNode());
171 }
ReplaceNode(SDNode * Old,const SDValue * New)172 void ReplaceNode(SDNode *Old, const SDValue *New) {
173 DAG.ReplaceAllUsesWith(Old, New);
174 ReplacedNode(Old);
175 }
176 };
177 }
178
179 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
180 /// performs the same shuffe in terms of order or result bytes, but on a type
181 /// whose vector element type is narrower than the original shuffle type.
182 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
183 SDValue
ShuffleWithNarrowerEltType(EVT NVT,EVT VT,DebugLoc dl,SDValue N1,SDValue N2,ArrayRef<int> Mask) const184 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
185 SDValue N1, SDValue N2,
186 ArrayRef<int> Mask) const {
187 unsigned NumMaskElts = VT.getVectorNumElements();
188 unsigned NumDestElts = NVT.getVectorNumElements();
189 unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
190
191 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!");
192
193 if (NumEltsGrowth == 1)
194 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]);
195
196 SmallVector<int, 8> NewMask;
197 for (unsigned i = 0; i != NumMaskElts; ++i) {
198 int Idx = Mask[i];
199 for (unsigned j = 0; j != NumEltsGrowth; ++j) {
200 if (Idx < 0)
201 NewMask.push_back(-1);
202 else
203 NewMask.push_back(Idx * NumEltsGrowth + j);
204 }
205 }
206 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?");
207 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?");
208 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]);
209 }
210
SelectionDAGLegalize(SelectionDAG & dag)211 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag)
212 : SelectionDAG::DAGUpdateListener(dag),
213 TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
214 DAG(dag) {
215 }
216
LegalizeDAG()217 void SelectionDAGLegalize::LegalizeDAG() {
218 DAG.AssignTopologicalOrder();
219
220 // Visit all the nodes. We start in topological order, so that we see
221 // nodes with their original operands intact. Legalization can produce
222 // new nodes which may themselves need to be legalized. Iterate until all
223 // nodes have been legalized.
224 for (;;) {
225 bool AnyLegalized = false;
226 for (LegalizePosition = DAG.allnodes_end();
227 LegalizePosition != DAG.allnodes_begin(); ) {
228 --LegalizePosition;
229
230 SDNode *N = LegalizePosition;
231 if (LegalizedNodes.insert(N)) {
232 AnyLegalized = true;
233 LegalizeOp(N);
234 }
235 }
236 if (!AnyLegalized)
237 break;
238
239 }
240
241 // Remove dead nodes now.
242 DAG.RemoveDeadNodes();
243 }
244
245 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
246 /// a load from the constant pool.
247 SDValue
ExpandConstantFP(ConstantFPSDNode * CFP,bool UseCP)248 SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
249 bool Extend = false;
250 DebugLoc dl = CFP->getDebugLoc();
251
252 // If a FP immediate is precise when represented as a float and if the
253 // target can do an extending load from float to double, we put it into
254 // the constant pool as a float, even if it's is statically typed as a
255 // double. This shrinks FP constants and canonicalizes them for targets where
256 // an FP extending load is the same cost as a normal load (such as on the x87
257 // fp stack or PPC FP unit).
258 EVT VT = CFP->getValueType(0);
259 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
260 if (!UseCP) {
261 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
262 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(),
263 (VT == MVT::f64) ? MVT::i64 : MVT::i32);
264 }
265
266 EVT OrigVT = VT;
267 EVT SVT = VT;
268 while (SVT != MVT::f32) {
269 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
270 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
271 // Only do this if the target has a native EXTLOAD instruction from
272 // smaller type.
273 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
274 TLI.ShouldShrinkFPConstant(OrigVT)) {
275 Type *SType = SVT.getTypeForEVT(*DAG.getContext());
276 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
277 VT = SVT;
278 Extend = true;
279 }
280 }
281
282 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
283 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
284 if (Extend) {
285 SDValue Result =
286 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
287 DAG.getEntryNode(),
288 CPIdx, MachinePointerInfo::getConstantPool(),
289 VT, false, false, Alignment);
290 return Result;
291 }
292 SDValue Result =
293 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
294 MachinePointerInfo::getConstantPool(), false, false, false,
295 Alignment);
296 return Result;
297 }
298
299 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
ExpandUnalignedStore(StoreSDNode * ST,SelectionDAG & DAG,const TargetLowering & TLI,SelectionDAGLegalize * DAGLegalize)300 static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
301 const TargetLowering &TLI,
302 SelectionDAGLegalize *DAGLegalize) {
303 assert(ST->getAddressingMode() == ISD::UNINDEXED &&
304 "unaligned indexed stores not implemented!");
305 SDValue Chain = ST->getChain();
306 SDValue Ptr = ST->getBasePtr();
307 SDValue Val = ST->getValue();
308 EVT VT = Val.getValueType();
309 int Alignment = ST->getAlignment();
310 DebugLoc dl = ST->getDebugLoc();
311 if (ST->getMemoryVT().isFloatingPoint() ||
312 ST->getMemoryVT().isVector()) {
313 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
314 if (TLI.isTypeLegal(intVT)) {
315 // Expand to a bitconvert of the value to the integer type of the
316 // same size, then a (misaligned) int store.
317 // FIXME: Does not handle truncating floating point stores!
318 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
319 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
320 ST->isVolatile(), ST->isNonTemporal(), Alignment);
321 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
322 return;
323 }
324 // Do a (aligned) store to a stack slot, then copy from the stack slot
325 // to the final destination using (unaligned) integer loads and stores.
326 EVT StoredVT = ST->getMemoryVT();
327 MVT RegVT =
328 TLI.getRegisterType(*DAG.getContext(),
329 EVT::getIntegerVT(*DAG.getContext(),
330 StoredVT.getSizeInBits()));
331 unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
332 unsigned RegBytes = RegVT.getSizeInBits() / 8;
333 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
334
335 // Make sure the stack slot is also aligned for the register type.
336 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
337
338 // Perform the original store, only redirected to the stack slot.
339 SDValue Store = DAG.getTruncStore(Chain, dl,
340 Val, StackPtr, MachinePointerInfo(),
341 StoredVT, false, false, 0);
342 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
343 SmallVector<SDValue, 8> Stores;
344 unsigned Offset = 0;
345
346 // Do all but one copies using the full register width.
347 for (unsigned i = 1; i < NumRegs; i++) {
348 // Load one integer register's worth from the stack slot.
349 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
350 MachinePointerInfo(),
351 false, false, false, 0);
352 // Store it to the final location. Remember the store.
353 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
354 ST->getPointerInfo().getWithOffset(Offset),
355 ST->isVolatile(), ST->isNonTemporal(),
356 MinAlign(ST->getAlignment(), Offset)));
357 // Increment the pointers.
358 Offset += RegBytes;
359 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
360 Increment);
361 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
362 }
363
364 // The last store may be partial. Do a truncating store. On big-endian
365 // machines this requires an extending load from the stack slot to ensure
366 // that the bits are in the right place.
367 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
368 8 * (StoredBytes - Offset));
369
370 // Load from the stack slot.
371 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
372 MachinePointerInfo(),
373 MemVT, false, false, 0);
374
375 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
376 ST->getPointerInfo()
377 .getWithOffset(Offset),
378 MemVT, ST->isVolatile(),
379 ST->isNonTemporal(),
380 MinAlign(ST->getAlignment(), Offset)));
381 // The order of the stores doesn't matter - say it with a TokenFactor.
382 SDValue Result =
383 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
384 Stores.size());
385 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
386 return;
387 }
388 assert(ST->getMemoryVT().isInteger() &&
389 !ST->getMemoryVT().isVector() &&
390 "Unaligned store of unknown type.");
391 // Get the half-size VT
392 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
393 int NumBits = NewStoredVT.getSizeInBits();
394 int IncrementSize = NumBits / 8;
395
396 // Divide the stored value in two parts.
397 SDValue ShiftAmount = DAG.getConstant(NumBits,
398 TLI.getShiftAmountTy(Val.getValueType()));
399 SDValue Lo = Val;
400 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
401
402 // Store the two parts
403 SDValue Store1, Store2;
404 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr,
405 ST->getPointerInfo(), NewStoredVT,
406 ST->isVolatile(), ST->isNonTemporal(), Alignment);
407 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
408 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
409 Alignment = MinAlign(Alignment, IncrementSize);
410 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr,
411 ST->getPointerInfo().getWithOffset(IncrementSize),
412 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
413 Alignment);
414
415 SDValue Result =
416 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
417 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
418 }
419
420 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
421 static void
ExpandUnalignedLoad(LoadSDNode * LD,SelectionDAG & DAG,const TargetLowering & TLI,SDValue & ValResult,SDValue & ChainResult)422 ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
423 const TargetLowering &TLI,
424 SDValue &ValResult, SDValue &ChainResult) {
425 assert(LD->getAddressingMode() == ISD::UNINDEXED &&
426 "unaligned indexed loads not implemented!");
427 SDValue Chain = LD->getChain();
428 SDValue Ptr = LD->getBasePtr();
429 EVT VT = LD->getValueType(0);
430 EVT LoadedVT = LD->getMemoryVT();
431 DebugLoc dl = LD->getDebugLoc();
432 if (VT.isFloatingPoint() || VT.isVector()) {
433 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
434 if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) {
435 // Expand to a (misaligned) integer load of the same size,
436 // then bitconvert to floating point or vector.
437 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
438 LD->isVolatile(),
439 LD->isNonTemporal(),
440 LD->isInvariant(), LD->getAlignment());
441 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
442 if (LoadedVT != VT)
443 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
444 ISD::ANY_EXTEND, dl, VT, Result);
445
446 ValResult = Result;
447 ChainResult = Chain;
448 return;
449 }
450
451 // Copy the value to a (aligned) stack slot using (unaligned) integer
452 // loads and stores, then do a (aligned) load from the stack slot.
453 MVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
454 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
455 unsigned RegBytes = RegVT.getSizeInBits() / 8;
456 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
457
458 // Make sure the stack slot is also aligned for the register type.
459 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
460
461 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
462 SmallVector<SDValue, 8> Stores;
463 SDValue StackPtr = StackBase;
464 unsigned Offset = 0;
465
466 // Do all but one copies using the full register width.
467 for (unsigned i = 1; i < NumRegs; i++) {
468 // Load one integer register's worth from the original location.
469 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
470 LD->getPointerInfo().getWithOffset(Offset),
471 LD->isVolatile(), LD->isNonTemporal(),
472 LD->isInvariant(),
473 MinAlign(LD->getAlignment(), Offset));
474 // Follow the load with a store to the stack slot. Remember the store.
475 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
476 MachinePointerInfo(), false, false, 0));
477 // Increment the pointers.
478 Offset += RegBytes;
479 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
480 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
481 Increment);
482 }
483
484 // The last copy may be partial. Do an extending load.
485 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
486 8 * (LoadedBytes - Offset));
487 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
488 LD->getPointerInfo().getWithOffset(Offset),
489 MemVT, LD->isVolatile(),
490 LD->isNonTemporal(),
491 MinAlign(LD->getAlignment(), Offset));
492 // Follow the load with a store to the stack slot. Remember the store.
493 // On big-endian machines this requires a truncating store to ensure
494 // that the bits end up in the right place.
495 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
496 MachinePointerInfo(), MemVT,
497 false, false, 0));
498
499 // The order of the stores doesn't matter - say it with a TokenFactor.
500 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
501 Stores.size());
502
503 // Finally, perform the original load only redirected to the stack slot.
504 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
505 MachinePointerInfo(), LoadedVT, false, false, 0);
506
507 // Callers expect a MERGE_VALUES node.
508 ValResult = Load;
509 ChainResult = TF;
510 return;
511 }
512 assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
513 "Unaligned load of unsupported type.");
514
515 // Compute the new VT that is half the size of the old one. This is an
516 // integer MVT.
517 unsigned NumBits = LoadedVT.getSizeInBits();
518 EVT NewLoadedVT;
519 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
520 NumBits >>= 1;
521
522 unsigned Alignment = LD->getAlignment();
523 unsigned IncrementSize = NumBits / 8;
524 ISD::LoadExtType HiExtType = LD->getExtensionType();
525
526 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
527 if (HiExtType == ISD::NON_EXTLOAD)
528 HiExtType = ISD::ZEXTLOAD;
529
530 // Load the value in two parts
531 SDValue Lo, Hi;
532 if (TLI.isLittleEndian()) {
533 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
534 NewLoadedVT, LD->isVolatile(),
535 LD->isNonTemporal(), Alignment);
536 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
537 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
538 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
539 LD->getPointerInfo().getWithOffset(IncrementSize),
540 NewLoadedVT, LD->isVolatile(),
541 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
542 } else {
543 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
544 NewLoadedVT, LD->isVolatile(),
545 LD->isNonTemporal(), Alignment);
546 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
547 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
548 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
549 LD->getPointerInfo().getWithOffset(IncrementSize),
550 NewLoadedVT, LD->isVolatile(),
551 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
552 }
553
554 // aggregate the two parts
555 SDValue ShiftAmount = DAG.getConstant(NumBits,
556 TLI.getShiftAmountTy(Hi.getValueType()));
557 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
558 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
559
560 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
561 Hi.getValue(1));
562
563 ValResult = Result;
564 ChainResult = TF;
565 }
566
567 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
568 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
569 /// is necessary to spill the vector being inserted into to memory, perform
570 /// the insert there, and then read the result back.
571 SDValue SelectionDAGLegalize::
PerformInsertVectorEltInMemory(SDValue Vec,SDValue Val,SDValue Idx,DebugLoc dl)572 PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
573 DebugLoc dl) {
574 SDValue Tmp1 = Vec;
575 SDValue Tmp2 = Val;
576 SDValue Tmp3 = Idx;
577
578 // If the target doesn't support this, we have to spill the input vector
579 // to a temporary stack slot, update the element, then reload it. This is
580 // badness. We could also load the value into a vector register (either
581 // with a "move to register" or "extload into register" instruction, then
582 // permute it into place, if the idx is a constant and if the idx is
583 // supported by the target.
584 EVT VT = Tmp1.getValueType();
585 EVT EltVT = VT.getVectorElementType();
586 EVT IdxVT = Tmp3.getValueType();
587 EVT PtrVT = TLI.getPointerTy();
588 SDValue StackPtr = DAG.CreateStackTemporary(VT);
589
590 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
591
592 // Store the vector.
593 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr,
594 MachinePointerInfo::getFixedStack(SPFI),
595 false, false, 0);
596
597 // Truncate or zero extend offset to target pointer type.
598 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
599 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3);
600 // Add the offset to the index.
601 unsigned EltSize = EltVT.getSizeInBits()/8;
602 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
603 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr);
604 // Store the scalar value.
605 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT,
606 false, false, 0);
607 // Load the updated vector.
608 return DAG.getLoad(VT, dl, Ch, StackPtr,
609 MachinePointerInfo::getFixedStack(SPFI), false, false,
610 false, 0);
611 }
612
613
614 SDValue SelectionDAGLegalize::
ExpandINSERT_VECTOR_ELT(SDValue Vec,SDValue Val,SDValue Idx,DebugLoc dl)615 ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) {
616 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
617 // SCALAR_TO_VECTOR requires that the type of the value being inserted
618 // match the element type of the vector being created, except for
619 // integers in which case the inserted value can be over width.
620 EVT EltVT = Vec.getValueType().getVectorElementType();
621 if (Val.getValueType() == EltVT ||
622 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) {
623 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
624 Vec.getValueType(), Val);
625
626 unsigned NumElts = Vec.getValueType().getVectorNumElements();
627 // We generate a shuffle of InVec and ScVec, so the shuffle mask
628 // should be 0,1,2,3,4,5... with the appropriate element replaced with
629 // elt 0 of the RHS.
630 SmallVector<int, 8> ShufOps;
631 for (unsigned i = 0; i != NumElts; ++i)
632 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts);
633
634 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec,
635 &ShufOps[0]);
636 }
637 }
638 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl);
639 }
640
OptimizeFloatStore(StoreSDNode * ST)641 SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
642 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
643 // FIXME: We shouldn't do this for TargetConstantFP's.
644 // FIXME: move this to the DAG Combiner! Note that we can't regress due
645 // to phase ordering between legalized code and the dag combiner. This
646 // probably means that we need to integrate dag combiner and legalizer
647 // together.
648 // We generally can't do this one for long doubles.
649 SDValue Chain = ST->getChain();
650 SDValue Ptr = ST->getBasePtr();
651 unsigned Alignment = ST->getAlignment();
652 bool isVolatile = ST->isVolatile();
653 bool isNonTemporal = ST->isNonTemporal();
654 DebugLoc dl = ST->getDebugLoc();
655 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) {
656 if (CFP->getValueType(0) == MVT::f32 &&
657 TLI.isTypeLegal(MVT::i32)) {
658 SDValue Con = DAG.getConstant(CFP->getValueAPF().
659 bitcastToAPInt().zextOrTrunc(32),
660 MVT::i32);
661 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
662 isVolatile, isNonTemporal, Alignment);
663 }
664
665 if (CFP->getValueType(0) == MVT::f64) {
666 // If this target supports 64-bit registers, do a single 64-bit store.
667 if (TLI.isTypeLegal(MVT::i64)) {
668 SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
669 zextOrTrunc(64), MVT::i64);
670 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
671 isVolatile, isNonTemporal, Alignment);
672 }
673
674 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) {
675 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
676 // stores. If the target supports neither 32- nor 64-bits, this
677 // xform is certainly not worth it.
678 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt();
679 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32);
680 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32);
681 if (TLI.isBigEndian()) std::swap(Lo, Hi);
682
683 Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
684 isNonTemporal, Alignment);
685 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
686 DAG.getIntPtrConstant(4));
687 Hi = DAG.getStore(Chain, dl, Hi, Ptr,
688 ST->getPointerInfo().getWithOffset(4),
689 isVolatile, isNonTemporal, MinAlign(Alignment, 4U));
690
691 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
692 }
693 }
694 }
695 return SDValue(0, 0);
696 }
697
LegalizeStoreOps(SDNode * Node)698 void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
699 StoreSDNode *ST = cast<StoreSDNode>(Node);
700 SDValue Chain = ST->getChain();
701 SDValue Ptr = ST->getBasePtr();
702 DebugLoc dl = Node->getDebugLoc();
703
704 unsigned Alignment = ST->getAlignment();
705 bool isVolatile = ST->isVolatile();
706 bool isNonTemporal = ST->isNonTemporal();
707
708 if (!ST->isTruncatingStore()) {
709 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
710 ReplaceNode(ST, OptStore);
711 return;
712 }
713
714 {
715 SDValue Value = ST->getValue();
716 MVT VT = Value.getSimpleValueType();
717 switch (TLI.getOperationAction(ISD::STORE, VT)) {
718 default: llvm_unreachable("This action is not supported yet!");
719 case TargetLowering::Legal:
720 // If this is an unaligned store and the target doesn't support it,
721 // expand it.
722 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
723 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
724 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
725 if (ST->getAlignment() < ABIAlignment)
726 ExpandUnalignedStore(cast<StoreSDNode>(Node),
727 DAG, TLI, this);
728 }
729 break;
730 case TargetLowering::Custom: {
731 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
732 if (Res.getNode())
733 ReplaceNode(SDValue(Node, 0), Res);
734 return;
735 }
736 case TargetLowering::Promote: {
737 MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT);
738 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
739 "Can only promote stores to same size type");
740 Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value);
741 SDValue Result =
742 DAG.getStore(Chain, dl, Value, Ptr,
743 ST->getPointerInfo(), isVolatile,
744 isNonTemporal, Alignment);
745 ReplaceNode(SDValue(Node, 0), Result);
746 break;
747 }
748 }
749 return;
750 }
751 } else {
752 SDValue Value = ST->getValue();
753
754 EVT StVT = ST->getMemoryVT();
755 unsigned StWidth = StVT.getSizeInBits();
756
757 if (StWidth != StVT.getStoreSizeInBits()) {
758 // Promote to a byte-sized store with upper bits zero if not
759 // storing an integral number of bytes. For example, promote
760 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
761 EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
762 StVT.getStoreSizeInBits());
763 Value = DAG.getZeroExtendInReg(Value, dl, StVT);
764 SDValue Result =
765 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
766 NVT, isVolatile, isNonTemporal, Alignment);
767 ReplaceNode(SDValue(Node, 0), Result);
768 } else if (StWidth & (StWidth - 1)) {
769 // If not storing a power-of-2 number of bits, expand as two stores.
770 assert(!StVT.isVector() && "Unsupported truncstore!");
771 unsigned RoundWidth = 1 << Log2_32(StWidth);
772 assert(RoundWidth < StWidth);
773 unsigned ExtraWidth = StWidth - RoundWidth;
774 assert(ExtraWidth < RoundWidth);
775 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
776 "Store size not an integral number of bytes!");
777 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
778 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
779 SDValue Lo, Hi;
780 unsigned IncrementSize;
781
782 if (TLI.isLittleEndian()) {
783 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
784 // Store the bottom RoundWidth bits.
785 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
786 RoundVT,
787 isVolatile, isNonTemporal, Alignment);
788
789 // Store the remaining ExtraWidth bits.
790 IncrementSize = RoundWidth / 8;
791 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
792 DAG.getIntPtrConstant(IncrementSize));
793 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
794 DAG.getConstant(RoundWidth,
795 TLI.getShiftAmountTy(Value.getValueType())));
796 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
797 ST->getPointerInfo().getWithOffset(IncrementSize),
798 ExtraVT, isVolatile, isNonTemporal,
799 MinAlign(Alignment, IncrementSize));
800 } else {
801 // Big endian - avoid unaligned stores.
802 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
803 // Store the top RoundWidth bits.
804 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
805 DAG.getConstant(ExtraWidth,
806 TLI.getShiftAmountTy(Value.getValueType())));
807 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
808 RoundVT, isVolatile, isNonTemporal, Alignment);
809
810 // Store the remaining ExtraWidth bits.
811 IncrementSize = RoundWidth / 8;
812 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
813 DAG.getIntPtrConstant(IncrementSize));
814 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
815 ST->getPointerInfo().getWithOffset(IncrementSize),
816 ExtraVT, isVolatile, isNonTemporal,
817 MinAlign(Alignment, IncrementSize));
818 }
819
820 // The order of the stores doesn't matter.
821 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
822 ReplaceNode(SDValue(Node, 0), Result);
823 } else {
824 switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
825 StVT.getSimpleVT())) {
826 default: llvm_unreachable("This action is not supported yet!");
827 case TargetLowering::Legal:
828 // If this is an unaligned store and the target doesn't support it,
829 // expand it.
830 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
831 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
832 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
833 if (ST->getAlignment() < ABIAlignment)
834 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
835 }
836 break;
837 case TargetLowering::Custom: {
838 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
839 if (Res.getNode())
840 ReplaceNode(SDValue(Node, 0), Res);
841 return;
842 }
843 case TargetLowering::Expand:
844 assert(!StVT.isVector() &&
845 "Vector Stores are handled in LegalizeVectorOps");
846
847 // TRUNCSTORE:i16 i32 -> STORE i16
848 assert(TLI.isTypeLegal(StVT) &&
849 "Do not know how to expand this store!");
850 Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value);
851 SDValue Result =
852 DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
853 isVolatile, isNonTemporal, Alignment);
854 ReplaceNode(SDValue(Node, 0), Result);
855 break;
856 }
857 }
858 }
859 }
860
LegalizeLoadOps(SDNode * Node)861 void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
862 LoadSDNode *LD = cast<LoadSDNode>(Node);
863 SDValue Chain = LD->getChain(); // The chain.
864 SDValue Ptr = LD->getBasePtr(); // The base pointer.
865 SDValue Value; // The value returned by the load op.
866 DebugLoc dl = Node->getDebugLoc();
867
868 ISD::LoadExtType ExtType = LD->getExtensionType();
869 if (ExtType == ISD::NON_EXTLOAD) {
870 MVT VT = Node->getSimpleValueType(0);
871 SDValue RVal = SDValue(Node, 0);
872 SDValue RChain = SDValue(Node, 1);
873
874 switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
875 default: llvm_unreachable("This action is not supported yet!");
876 case TargetLowering::Legal:
877 // If this is an unaligned load and the target doesn't support it,
878 // expand it.
879 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
880 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
881 unsigned ABIAlignment =
882 TLI.getDataLayout()->getABITypeAlignment(Ty);
883 if (LD->getAlignment() < ABIAlignment){
884 ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
885 }
886 }
887 break;
888 case TargetLowering::Custom: {
889 SDValue Res = TLI.LowerOperation(RVal, DAG);
890 if (Res.getNode()) {
891 RVal = Res;
892 RChain = Res.getValue(1);
893 }
894 break;
895 }
896 case TargetLowering::Promote: {
897 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
898 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
899 "Can only promote loads to same size type");
900
901 SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(),
902 LD->isVolatile(), LD->isNonTemporal(),
903 LD->isInvariant(), LD->getAlignment());
904 RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
905 RChain = Res.getValue(1);
906 break;
907 }
908 }
909 if (RChain.getNode() != Node) {
910 assert(RVal.getNode() != Node && "Load must be completely replaced");
911 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal);
912 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain);
913 ReplacedNode(Node);
914 }
915 return;
916 }
917
918 EVT SrcVT = LD->getMemoryVT();
919 unsigned SrcWidth = SrcVT.getSizeInBits();
920 unsigned Alignment = LD->getAlignment();
921 bool isVolatile = LD->isVolatile();
922 bool isNonTemporal = LD->isNonTemporal();
923
924 if (SrcWidth != SrcVT.getStoreSizeInBits() &&
925 // Some targets pretend to have an i1 loading operation, and actually
926 // load an i8. This trick is correct for ZEXTLOAD because the top 7
927 // bits are guaranteed to be zero; it helps the optimizers understand
928 // that these bits are zero. It is also useful for EXTLOAD, since it
929 // tells the optimizers that those bits are undefined. It would be
930 // nice to have an effective generic way of getting these benefits...
931 // Until such a way is found, don't insist on promoting i1 here.
932 (SrcVT != MVT::i1 ||
933 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
934 // Promote to a byte-sized load if not loading an integral number of
935 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
936 unsigned NewWidth = SrcVT.getStoreSizeInBits();
937 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
938 SDValue Ch;
939
940 // The extra bits are guaranteed to be zero, since we stored them that
941 // way. A zext load from NVT thus automatically gives zext from SrcVT.
942
943 ISD::LoadExtType NewExtType =
944 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
945
946 SDValue Result =
947 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
948 Chain, Ptr, LD->getPointerInfo(),
949 NVT, isVolatile, isNonTemporal, Alignment);
950
951 Ch = Result.getValue(1); // The chain.
952
953 if (ExtType == ISD::SEXTLOAD)
954 // Having the top bits zero doesn't help when sign extending.
955 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
956 Result.getValueType(),
957 Result, DAG.getValueType(SrcVT));
958 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
959 // All the top bits are guaranteed to be zero - inform the optimizers.
960 Result = DAG.getNode(ISD::AssertZext, dl,
961 Result.getValueType(), Result,
962 DAG.getValueType(SrcVT));
963
964 Value = Result;
965 Chain = Ch;
966 } else if (SrcWidth & (SrcWidth - 1)) {
967 // If not loading a power-of-2 number of bits, expand as two loads.
968 assert(!SrcVT.isVector() && "Unsupported extload!");
969 unsigned RoundWidth = 1 << Log2_32(SrcWidth);
970 assert(RoundWidth < SrcWidth);
971 unsigned ExtraWidth = SrcWidth - RoundWidth;
972 assert(ExtraWidth < RoundWidth);
973 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
974 "Load size not an integral number of bytes!");
975 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
976 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
977 SDValue Lo, Hi, Ch;
978 unsigned IncrementSize;
979
980 if (TLI.isLittleEndian()) {
981 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
982 // Load the bottom RoundWidth bits.
983 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
984 Chain, Ptr,
985 LD->getPointerInfo(), RoundVT, isVolatile,
986 isNonTemporal, Alignment);
987
988 // Load the remaining ExtraWidth bits.
989 IncrementSize = RoundWidth / 8;
990 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
991 DAG.getIntPtrConstant(IncrementSize));
992 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
993 LD->getPointerInfo().getWithOffset(IncrementSize),
994 ExtraVT, isVolatile, isNonTemporal,
995 MinAlign(Alignment, IncrementSize));
996
997 // Build a factor node to remember that this load is independent of
998 // the other one.
999 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
1000 Hi.getValue(1));
1001
1002 // Move the top bits to the right place.
1003 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
1004 DAG.getConstant(RoundWidth,
1005 TLI.getShiftAmountTy(Hi.getValueType())));
1006
1007 // Join the hi and lo parts.
1008 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
1009 } else {
1010 // Big endian - avoid unaligned loads.
1011 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1012 // Load the top RoundWidth bits.
1013 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
1014 LD->getPointerInfo(), RoundVT, isVolatile,
1015 isNonTemporal, Alignment);
1016
1017 // Load the remaining ExtraWidth bits.
1018 IncrementSize = RoundWidth / 8;
1019 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
1020 DAG.getIntPtrConstant(IncrementSize));
1021 Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
1022 dl, Node->getValueType(0), Chain, Ptr,
1023 LD->getPointerInfo().getWithOffset(IncrementSize),
1024 ExtraVT, isVolatile, isNonTemporal,
1025 MinAlign(Alignment, IncrementSize));
1026
1027 // Build a factor node to remember that this load is independent of
1028 // the other one.
1029 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
1030 Hi.getValue(1));
1031
1032 // Move the top bits to the right place.
1033 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
1034 DAG.getConstant(ExtraWidth,
1035 TLI.getShiftAmountTy(Hi.getValueType())));
1036
1037 // Join the hi and lo parts.
1038 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
1039 }
1040
1041 Chain = Ch;
1042 } else {
1043 bool isCustom = false;
1044 switch (TLI.getLoadExtAction(ExtType, SrcVT.getSimpleVT())) {
1045 default: llvm_unreachable("This action is not supported yet!");
1046 case TargetLowering::Custom:
1047 isCustom = true;
1048 // FALLTHROUGH
1049 case TargetLowering::Legal: {
1050 Value = SDValue(Node, 0);
1051 Chain = SDValue(Node, 1);
1052
1053 if (isCustom) {
1054 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
1055 if (Res.getNode()) {
1056 Value = Res;
1057 Chain = Res.getValue(1);
1058 }
1059 } else {
1060 // If this is an unaligned load and the target doesn't support it,
1061 // expand it.
1062 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
1063 Type *Ty =
1064 LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
1065 unsigned ABIAlignment =
1066 TLI.getDataLayout()->getABITypeAlignment(Ty);
1067 if (LD->getAlignment() < ABIAlignment){
1068 ExpandUnalignedLoad(cast<LoadSDNode>(Node),
1069 DAG, TLI, Value, Chain);
1070 }
1071 }
1072 }
1073 break;
1074 }
1075 case TargetLowering::Expand:
1076 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) {
1077 SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr,
1078 LD->getPointerInfo(),
1079 LD->isVolatile(), LD->isNonTemporal(),
1080 LD->isInvariant(), LD->getAlignment());
1081 unsigned ExtendOp;
1082 switch (ExtType) {
1083 case ISD::EXTLOAD:
1084 ExtendOp = (SrcVT.isFloatingPoint() ?
1085 ISD::FP_EXTEND : ISD::ANY_EXTEND);
1086 break;
1087 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
1088 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
1089 default: llvm_unreachable("Unexpected extend load type!");
1090 }
1091 Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
1092 Chain = Load.getValue(1);
1093 break;
1094 }
1095
1096 assert(!SrcVT.isVector() &&
1097 "Vector Loads are handled in LegalizeVectorOps");
1098
1099 // FIXME: This does not work for vectors on most targets. Sign- and
1100 // zero-extend operations are currently folded into extending loads,
1101 // whether they are legal or not, and then we end up here without any
1102 // support for legalizing them.
1103 assert(ExtType != ISD::EXTLOAD &&
1104 "EXTLOAD should always be supported!");
1105 // Turn the unsupported load into an EXTLOAD followed by an explicit
1106 // zero/sign extend inreg.
1107 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
1108 Chain, Ptr, LD->getPointerInfo(), SrcVT,
1109 LD->isVolatile(), LD->isNonTemporal(),
1110 LD->getAlignment());
1111 SDValue ValRes;
1112 if (ExtType == ISD::SEXTLOAD)
1113 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
1114 Result.getValueType(),
1115 Result, DAG.getValueType(SrcVT));
1116 else
1117 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
1118 Value = ValRes;
1119 Chain = Result.getValue(1);
1120 break;
1121 }
1122 }
1123
1124 // Since loads produce two values, make sure to remember that we legalized
1125 // both of them.
1126 if (Chain.getNode() != Node) {
1127 assert(Value.getNode() != Node && "Load must be completely replaced");
1128 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value);
1129 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
1130 ReplacedNode(Node);
1131 }
1132 }
1133
1134 /// LegalizeOp - Return a legal replacement for the given operation, with
1135 /// all legal operands.
LegalizeOp(SDNode * Node)1136 void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
1137 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
1138 return;
1139
1140 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
1141 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) ==
1142 TargetLowering::TypeLegal &&
1143 "Unexpected illegal type!");
1144
1145 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
1146 assert((TLI.getTypeAction(*DAG.getContext(),
1147 Node->getOperand(i).getValueType()) ==
1148 TargetLowering::TypeLegal ||
1149 Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
1150 "Unexpected illegal type!");
1151
1152 // Figure out the correct action; the way to query this varies by opcode
1153 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
1154 bool SimpleFinishLegalizing = true;
1155 switch (Node->getOpcode()) {
1156 case ISD::INTRINSIC_W_CHAIN:
1157 case ISD::INTRINSIC_WO_CHAIN:
1158 case ISD::INTRINSIC_VOID:
1159 case ISD::STACKSAVE:
1160 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
1161 break;
1162 case ISD::VAARG:
1163 Action = TLI.getOperationAction(Node->getOpcode(),
1164 Node->getValueType(0));
1165 if (Action != TargetLowering::Promote)
1166 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
1167 break;
1168 case ISD::SINT_TO_FP:
1169 case ISD::UINT_TO_FP:
1170 case ISD::EXTRACT_VECTOR_ELT:
1171 Action = TLI.getOperationAction(Node->getOpcode(),
1172 Node->getOperand(0).getValueType());
1173 break;
1174 case ISD::FP_ROUND_INREG:
1175 case ISD::SIGN_EXTEND_INREG: {
1176 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
1177 Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
1178 break;
1179 }
1180 case ISD::ATOMIC_STORE: {
1181 Action = TLI.getOperationAction(Node->getOpcode(),
1182 Node->getOperand(2).getValueType());
1183 break;
1184 }
1185 case ISD::SELECT_CC:
1186 case ISD::SETCC:
1187 case ISD::BR_CC: {
1188 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 :
1189 Node->getOpcode() == ISD::SETCC ? 2 : 1;
1190 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0;
1191 MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType();
1192 ISD::CondCode CCCode =
1193 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get();
1194 Action = TLI.getCondCodeAction(CCCode, OpVT);
1195 if (Action == TargetLowering::Legal) {
1196 if (Node->getOpcode() == ISD::SELECT_CC)
1197 Action = TLI.getOperationAction(Node->getOpcode(),
1198 Node->getValueType(0));
1199 else
1200 Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
1201 }
1202 break;
1203 }
1204 case ISD::LOAD:
1205 case ISD::STORE:
1206 // FIXME: Model these properly. LOAD and STORE are complicated, and
1207 // STORE expects the unlegalized operand in some cases.
1208 SimpleFinishLegalizing = false;
1209 break;
1210 case ISD::CALLSEQ_START:
1211 case ISD::CALLSEQ_END:
1212 // FIXME: This shouldn't be necessary. These nodes have special properties
1213 // dealing with the recursive nature of legalization. Removing this
1214 // special case should be done as part of making LegalizeDAG non-recursive.
1215 SimpleFinishLegalizing = false;
1216 break;
1217 case ISD::EXTRACT_ELEMENT:
1218 case ISD::FLT_ROUNDS_:
1219 case ISD::SADDO:
1220 case ISD::SSUBO:
1221 case ISD::UADDO:
1222 case ISD::USUBO:
1223 case ISD::SMULO:
1224 case ISD::UMULO:
1225 case ISD::FPOWI:
1226 case ISD::MERGE_VALUES:
1227 case ISD::EH_RETURN:
1228 case ISD::FRAME_TO_ARGS_OFFSET:
1229 case ISD::EH_SJLJ_SETJMP:
1230 case ISD::EH_SJLJ_LONGJMP:
1231 // These operations lie about being legal: when they claim to be legal,
1232 // they should actually be expanded.
1233 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1234 if (Action == TargetLowering::Legal)
1235 Action = TargetLowering::Expand;
1236 break;
1237 case ISD::INIT_TRAMPOLINE:
1238 case ISD::ADJUST_TRAMPOLINE:
1239 case ISD::FRAMEADDR:
1240 case ISD::RETURNADDR:
1241 // These operations lie about being legal: when they claim to be legal,
1242 // they should actually be custom-lowered.
1243 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1244 if (Action == TargetLowering::Legal)
1245 Action = TargetLowering::Custom;
1246 break;
1247 case ISD::DEBUGTRAP:
1248 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1249 if (Action == TargetLowering::Expand) {
1250 // replace ISD::DEBUGTRAP with ISD::TRAP
1251 SDValue NewVal;
1252 NewVal = DAG.getNode(ISD::TRAP, Node->getDebugLoc(), Node->getVTList(),
1253 Node->getOperand(0));
1254 ReplaceNode(Node, NewVal.getNode());
1255 LegalizeOp(NewVal.getNode());
1256 return;
1257 }
1258 break;
1259
1260 default:
1261 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
1262 Action = TargetLowering::Legal;
1263 } else {
1264 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1265 }
1266 break;
1267 }
1268
1269 if (SimpleFinishLegalizing) {
1270 SDNode *NewNode = Node;
1271 switch (Node->getOpcode()) {
1272 default: break;
1273 case ISD::SHL:
1274 case ISD::SRL:
1275 case ISD::SRA:
1276 case ISD::ROTL:
1277 case ISD::ROTR:
1278 // Legalizing shifts/rotates requires adjusting the shift amount
1279 // to the appropriate width.
1280 if (!Node->getOperand(1).getValueType().isVector()) {
1281 SDValue SAO =
1282 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
1283 Node->getOperand(1));
1284 HandleSDNode Handle(SAO);
1285 LegalizeOp(SAO.getNode());
1286 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
1287 Handle.getValue());
1288 }
1289 break;
1290 case ISD::SRL_PARTS:
1291 case ISD::SRA_PARTS:
1292 case ISD::SHL_PARTS:
1293 // Legalizing shifts/rotates requires adjusting the shift amount
1294 // to the appropriate width.
1295 if (!Node->getOperand(2).getValueType().isVector()) {
1296 SDValue SAO =
1297 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
1298 Node->getOperand(2));
1299 HandleSDNode Handle(SAO);
1300 LegalizeOp(SAO.getNode());
1301 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
1302 Node->getOperand(1),
1303 Handle.getValue());
1304 }
1305 break;
1306 }
1307
1308 if (NewNode != Node) {
1309 DAG.ReplaceAllUsesWith(Node, NewNode);
1310 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
1311 DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i));
1312 ReplacedNode(Node);
1313 Node = NewNode;
1314 }
1315 switch (Action) {
1316 case TargetLowering::Legal:
1317 return;
1318 case TargetLowering::Custom: {
1319 // FIXME: The handling for custom lowering with multiple results is
1320 // a complete mess.
1321 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
1322 if (Res.getNode()) {
1323 SmallVector<SDValue, 8> ResultVals;
1324 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) {
1325 if (e == 1)
1326 ResultVals.push_back(Res);
1327 else
1328 ResultVals.push_back(Res.getValue(i));
1329 }
1330 if (Res.getNode() != Node || Res.getResNo() != 0) {
1331 DAG.ReplaceAllUsesWith(Node, ResultVals.data());
1332 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
1333 DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]);
1334 ReplacedNode(Node);
1335 }
1336 return;
1337 }
1338 }
1339 // FALL THROUGH
1340 case TargetLowering::Expand:
1341 ExpandNode(Node);
1342 return;
1343 case TargetLowering::Promote:
1344 PromoteNode(Node);
1345 return;
1346 }
1347 }
1348
1349 switch (Node->getOpcode()) {
1350 default:
1351 #ifndef NDEBUG
1352 dbgs() << "NODE: ";
1353 Node->dump( &DAG);
1354 dbgs() << "\n";
1355 #endif
1356 llvm_unreachable("Do not know how to legalize this operator!");
1357
1358 case ISD::CALLSEQ_START:
1359 case ISD::CALLSEQ_END:
1360 break;
1361 case ISD::LOAD: {
1362 return LegalizeLoadOps(Node);
1363 }
1364 case ISD::STORE: {
1365 return LegalizeStoreOps(Node);
1366 }
1367 }
1368 }
1369
ExpandExtractFromVectorThroughStack(SDValue Op)1370 SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
1371 SDValue Vec = Op.getOperand(0);
1372 SDValue Idx = Op.getOperand(1);
1373 DebugLoc dl = Op.getDebugLoc();
1374 // Store the value to a temporary stack slot, then LOAD the returned part.
1375 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
1376 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
1377 MachinePointerInfo(), false, false, 0);
1378
1379 // Add the offset to the index.
1380 unsigned EltSize =
1381 Vec.getValueType().getVectorElementType().getSizeInBits()/8;
1382 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
1383 DAG.getConstant(EltSize, Idx.getValueType()));
1384
1385 if (Idx.getValueType().bitsGT(TLI.getPointerTy()))
1386 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx);
1387 else
1388 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx);
1389
1390 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
1391
1392 if (Op.getValueType().isVector())
1393 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(),
1394 false, false, false, 0);
1395 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
1396 MachinePointerInfo(),
1397 Vec.getValueType().getVectorElementType(),
1398 false, false, 0);
1399 }
1400
ExpandInsertToVectorThroughStack(SDValue Op)1401 SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
1402 assert(Op.getValueType().isVector() && "Non-vector insert subvector!");
1403
1404 SDValue Vec = Op.getOperand(0);
1405 SDValue Part = Op.getOperand(1);
1406 SDValue Idx = Op.getOperand(2);
1407 DebugLoc dl = Op.getDebugLoc();
1408
1409 // Store the value to a temporary stack slot, then LOAD the returned part.
1410
1411 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
1412 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
1413 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
1414
1415 // First store the whole vector.
1416 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
1417 false, false, 0);
1418
1419 // Then store the inserted part.
1420
1421 // Add the offset to the index.
1422 unsigned EltSize =
1423 Vec.getValueType().getVectorElementType().getSizeInBits()/8;
1424
1425 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
1426 DAG.getConstant(EltSize, Idx.getValueType()));
1427
1428 if (Idx.getValueType().bitsGT(TLI.getPointerTy()))
1429 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx);
1430 else
1431 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx);
1432
1433 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
1434 StackPtr);
1435
1436 // Store the subvector.
1437 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr,
1438 MachinePointerInfo(), false, false, 0);
1439
1440 // Finally, load the updated vector.
1441 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo,
1442 false, false, false, 0);
1443 }
1444
ExpandVectorBuildThroughStack(SDNode * Node)1445 SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
1446 // We can't handle this case efficiently. Allocate a sufficiently
1447 // aligned object on the stack, store each element into it, then load
1448 // the result as a vector.
1449 // Create the stack frame object.
1450 EVT VT = Node->getValueType(0);
1451 EVT EltVT = VT.getVectorElementType();
1452 DebugLoc dl = Node->getDebugLoc();
1453 SDValue FIPtr = DAG.CreateStackTemporary(VT);
1454 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
1455 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
1456
1457 // Emit a store of each element to the stack slot.
1458 SmallVector<SDValue, 8> Stores;
1459 unsigned TypeByteSize = EltVT.getSizeInBits() / 8;
1460 // Store (in the right endianness) the elements to memory.
1461 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
1462 // Ignore undef elements.
1463 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1464
1465 unsigned Offset = TypeByteSize*i;
1466
1467 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType());
1468 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx);
1469
1470 // If the destination vector element type is narrower than the source
1471 // element type, only store the bits necessary.
1472 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) {
1473 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
1474 Node->getOperand(i), Idx,
1475 PtrInfo.getWithOffset(Offset),
1476 EltVT, false, false, 0));
1477 } else
1478 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
1479 Node->getOperand(i), Idx,
1480 PtrInfo.getWithOffset(Offset),
1481 false, false, 0));
1482 }
1483
1484 SDValue StoreChain;
1485 if (!Stores.empty()) // Not all undef elements?
1486 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1487 &Stores[0], Stores.size());
1488 else
1489 StoreChain = DAG.getEntryNode();
1490
1491 // Result is a load from the stack slot.
1492 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
1493 false, false, false, 0);
1494 }
1495
ExpandFCOPYSIGN(SDNode * Node)1496 SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
1497 DebugLoc dl = Node->getDebugLoc();
1498 SDValue Tmp1 = Node->getOperand(0);
1499 SDValue Tmp2 = Node->getOperand(1);
1500
1501 // Get the sign bit of the RHS. First obtain a value that has the same
1502 // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
1503 SDValue SignBit;
1504 EVT FloatVT = Tmp2.getValueType();
1505 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
1506 if (TLI.isTypeLegal(IVT)) {
1507 // Convert to an integer with the same sign bit.
1508 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
1509 } else {
1510 // Store the float to memory, then load the sign part out as an integer.
1511 MVT LoadTy = TLI.getPointerTy();
1512 // First create a temporary that is aligned for both the load and store.
1513 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
1514 // Then store the float to it.
1515 SDValue Ch =
1516 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
1517 false, false, 0);
1518 if (TLI.isBigEndian()) {
1519 assert(FloatVT.isByteSized() && "Unsupported floating point type!");
1520 // Load out a legal integer with the same sign bit as the float.
1521 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
1522 false, false, false, 0);
1523 } else { // Little endian
1524 SDValue LoadPtr = StackPtr;
1525 // The float may be wider than the integer we are going to load. Advance
1526 // the pointer so that the loaded integer will contain the sign bit.
1527 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits();
1528 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8;
1529 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(),
1530 LoadPtr, DAG.getIntPtrConstant(ByteOffset));
1531 // Load a legal integer containing the sign bit.
1532 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
1533 false, false, false, 0);
1534 // Move the sign bit to the top bit of the loaded integer.
1535 unsigned BitShift = LoadTy.getSizeInBits() -
1536 (FloatVT.getSizeInBits() - 8 * ByteOffset);
1537 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
1538 if (BitShift)
1539 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit,
1540 DAG.getConstant(BitShift,
1541 TLI.getShiftAmountTy(SignBit.getValueType())));
1542 }
1543 }
1544 // Now get the sign bit proper, by seeing whether the value is negative.
1545 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()),
1546 SignBit, DAG.getConstant(0, SignBit.getValueType()),
1547 ISD::SETLT);
1548 // Get the absolute value of the result.
1549 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1);
1550 // Select between the nabs and abs value based on the sign bit of
1551 // the input.
1552 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit,
1553 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
1554 AbsVal);
1555 }
1556
ExpandDYNAMIC_STACKALLOC(SDNode * Node,SmallVectorImpl<SDValue> & Results)1557 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
1558 SmallVectorImpl<SDValue> &Results) {
1559 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1560 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
1561 " not tell us which reg is the stack pointer!");
1562 DebugLoc dl = Node->getDebugLoc();
1563 EVT VT = Node->getValueType(0);
1564 SDValue Tmp1 = SDValue(Node, 0);
1565 SDValue Tmp2 = SDValue(Node, 1);
1566 SDValue Tmp3 = Node->getOperand(2);
1567 SDValue Chain = Tmp1.getOperand(0);
1568
1569 // Chain the dynamic stack allocation so that it doesn't modify the stack
1570 // pointer when other instructions are using the stack.
1571 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
1572
1573 SDValue Size = Tmp2.getOperand(1);
1574 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
1575 Chain = SP.getValue(1);
1576 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
1577 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
1578 if (Align > StackAlign)
1579 SP = DAG.getNode(ISD::AND, dl, VT, SP,
1580 DAG.getConstant(-(uint64_t)Align, VT));
1581 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
1582 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
1583
1584 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
1585 DAG.getIntPtrConstant(0, true), SDValue());
1586
1587 Results.push_back(Tmp1);
1588 Results.push_back(Tmp2);
1589 }
1590
1591 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1592 /// condition code CC on the current target. This routine expands SETCC with
1593 /// illegal condition code into AND / OR of multiple SETCC values.
LegalizeSetCCCondCode(EVT VT,SDValue & LHS,SDValue & RHS,SDValue & CC,DebugLoc dl)1594 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
1595 SDValue &LHS, SDValue &RHS,
1596 SDValue &CC,
1597 DebugLoc dl) {
1598 MVT OpVT = LHS.getSimpleValueType();
1599 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
1600 switch (TLI.getCondCodeAction(CCCode, OpVT)) {
1601 default: llvm_unreachable("Unknown condition code action!");
1602 case TargetLowering::Legal:
1603 // Nothing to do.
1604 break;
1605 case TargetLowering::Expand: {
1606 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
1607 ISD::CondCode InvCC = ISD::SETCC_INVALID;
1608 unsigned Opc = 0;
1609 switch (CCCode) {
1610 default: llvm_unreachable("Don't know how to expand this condition!");
1611 case ISD::SETO:
1612 assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT)
1613 == TargetLowering::Legal
1614 && "If SETO is expanded, SETOEQ must be legal!");
1615 CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
1616 case ISD::SETUO:
1617 assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT)
1618 == TargetLowering::Legal
1619 && "If SETUO is expanded, SETUNE must be legal!");
1620 CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break;
1621 case ISD::SETOEQ:
1622 case ISD::SETOGT:
1623 case ISD::SETOGE:
1624 case ISD::SETOLT:
1625 case ISD::SETOLE:
1626 case ISD::SETONE:
1627 case ISD::SETUEQ:
1628 case ISD::SETUNE:
1629 case ISD::SETUGT:
1630 case ISD::SETUGE:
1631 case ISD::SETULT:
1632 case ISD::SETULE:
1633 // If we are floating point, assign and break, otherwise fall through.
1634 if (!OpVT.isInteger()) {
1635 // We can use the 4th bit to tell if we are the unordered
1636 // or ordered version of the opcode.
1637 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
1638 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND;
1639 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10);
1640 break;
1641 }
1642 // Fallthrough if we are unsigned integer.
1643 case ISD::SETLE:
1644 case ISD::SETGT:
1645 case ISD::SETGE:
1646 case ISD::SETLT:
1647 case ISD::SETNE:
1648 case ISD::SETEQ:
1649 InvCC = ISD::getSetCCSwappedOperands(CCCode);
1650 if (TLI.getCondCodeAction(InvCC, OpVT) == TargetLowering::Expand) {
1651 // We only support using the inverted operation and not a
1652 // different manner of supporting expanding these cases.
1653 llvm_unreachable("Don't know how to expand this condition!");
1654 }
1655 LHS = DAG.getSetCC(dl, VT, RHS, LHS, InvCC);
1656 RHS = SDValue();
1657 CC = SDValue();
1658 return;
1659 }
1660
1661 SDValue SetCC1, SetCC2;
1662 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
1663 // If we aren't the ordered or unorder operation,
1664 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS).
1665 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1);
1666 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2);
1667 } else {
1668 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS)
1669 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1);
1670 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2);
1671 }
1672 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2);
1673 RHS = SDValue();
1674 CC = SDValue();
1675 break;
1676 }
1677 }
1678 }
1679
1680 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
1681 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1682 /// a load from the stack slot to DestVT, extending it if needed.
1683 /// The resultant code need not be legal.
EmitStackConvert(SDValue SrcOp,EVT SlotVT,EVT DestVT,DebugLoc dl)1684 SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
1685 EVT SlotVT,
1686 EVT DestVT,
1687 DebugLoc dl) {
1688 // Create the stack frame object.
1689 unsigned SrcAlign =
1690 TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
1691 getTypeForEVT(*DAG.getContext()));
1692 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
1693
1694 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
1695 int SPFI = StackPtrFI->getIndex();
1696 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
1697
1698 unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
1699 unsigned SlotSize = SlotVT.getSizeInBits();
1700 unsigned DestSize = DestVT.getSizeInBits();
1701 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
1702 unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
1703
1704 // Emit a store to the stack slot. Use a truncstore if the input value is
1705 // later than DestVT.
1706 SDValue Store;
1707
1708 if (SrcSize > SlotSize)
1709 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
1710 PtrInfo, SlotVT, false, false, SrcAlign);
1711 else {
1712 assert(SrcSize == SlotSize && "Invalid store");
1713 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
1714 PtrInfo, false, false, SrcAlign);
1715 }
1716
1717 // Result is a load from the stack slot.
1718 if (SlotSize == DestSize)
1719 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo,
1720 false, false, false, DestAlign);
1721
1722 assert(SlotSize < DestSize && "Unknown extension!");
1723 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr,
1724 PtrInfo, SlotVT, false, false, DestAlign);
1725 }
1726
ExpandSCALAR_TO_VECTOR(SDNode * Node)1727 SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
1728 DebugLoc dl = Node->getDebugLoc();
1729 // Create a vector sized/aligned stack slot, store the value to element #0,
1730 // then load the whole vector back out.
1731 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0));
1732
1733 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr);
1734 int SPFI = StackPtrFI->getIndex();
1735
1736 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0),
1737 StackPtr,
1738 MachinePointerInfo::getFixedStack(SPFI),
1739 Node->getValueType(0).getVectorElementType(),
1740 false, false, 0);
1741 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr,
1742 MachinePointerInfo::getFixedStack(SPFI),
1743 false, false, false, 0);
1744 }
1745
1746
1747 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
1748 /// support the operation, but do support the resultant vector type.
ExpandBUILD_VECTOR(SDNode * Node)1749 SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
1750 unsigned NumElems = Node->getNumOperands();
1751 SDValue Value1, Value2;
1752 DebugLoc dl = Node->getDebugLoc();
1753 EVT VT = Node->getValueType(0);
1754 EVT OpVT = Node->getOperand(0).getValueType();
1755 EVT EltVT = VT.getVectorElementType();
1756
1757 // If the only non-undef value is the low element, turn this into a
1758 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1759 bool isOnlyLowElement = true;
1760 bool MoreThanTwoValues = false;
1761 bool isConstant = true;
1762 for (unsigned i = 0; i < NumElems; ++i) {
1763 SDValue V = Node->getOperand(i);
1764 if (V.getOpcode() == ISD::UNDEF)
1765 continue;
1766 if (i > 0)
1767 isOnlyLowElement = false;
1768 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
1769 isConstant = false;
1770
1771 if (!Value1.getNode()) {
1772 Value1 = V;
1773 } else if (!Value2.getNode()) {
1774 if (V != Value1)
1775 Value2 = V;
1776 } else if (V != Value1 && V != Value2) {
1777 MoreThanTwoValues = true;
1778 }
1779 }
1780
1781 if (!Value1.getNode())
1782 return DAG.getUNDEF(VT);
1783
1784 if (isOnlyLowElement)
1785 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0));
1786
1787 // If all elements are constants, create a load from the constant pool.
1788 if (isConstant) {
1789 SmallVector<Constant*, 16> CV;
1790 for (unsigned i = 0, e = NumElems; i != e; ++i) {
1791 if (ConstantFPSDNode *V =
1792 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) {
1793 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue()));
1794 } else if (ConstantSDNode *V =
1795 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
1796 if (OpVT==EltVT)
1797 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
1798 else {
1799 // If OpVT and EltVT don't match, EltVT is not legal and the
1800 // element values have been promoted/truncated earlier. Undo this;
1801 // we don't want a v16i8 to become a v16i32 for example.
1802 const ConstantInt *CI = V->getConstantIntValue();
1803 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()),
1804 CI->getZExtValue()));
1805 }
1806 } else {
1807 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
1808 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
1809 CV.push_back(UndefValue::get(OpNTy));
1810 }
1811 }
1812 Constant *CP = ConstantVector::get(CV);
1813 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy());
1814 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
1815 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
1816 MachinePointerInfo::getConstantPool(),
1817 false, false, false, Alignment);
1818 }
1819
1820 if (!MoreThanTwoValues) {
1821 SmallVector<int, 8> ShuffleVec(NumElems, -1);
1822 for (unsigned i = 0; i < NumElems; ++i) {
1823 SDValue V = Node->getOperand(i);
1824 if (V.getOpcode() == ISD::UNDEF)
1825 continue;
1826 ShuffleVec[i] = V == Value1 ? 0 : NumElems;
1827 }
1828 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
1829 // Get the splatted value into the low element of a vector register.
1830 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
1831 SDValue Vec2;
1832 if (Value2.getNode())
1833 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
1834 else
1835 Vec2 = DAG.getUNDEF(VT);
1836
1837 // Return shuffle(LowValVec, undef, <0,0,0,0>)
1838 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
1839 }
1840 }
1841
1842 // Otherwise, we can't handle this case efficiently.
1843 return ExpandVectorBuildThroughStack(Node);
1844 }
1845
1846 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
1847 // does not fit into a register, return the lo part and set the hi part to the
1848 // by-reg argument. If it does fit into a single register, return the result
1849 // and leave the Hi part unset.
ExpandLibCall(RTLIB::Libcall LC,SDNode * Node,bool isSigned)1850 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
1851 bool isSigned) {
1852 TargetLowering::ArgListTy Args;
1853 TargetLowering::ArgListEntry Entry;
1854 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
1855 EVT ArgVT = Node->getOperand(i).getValueType();
1856 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1857 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
1858 Entry.isSExt = isSigned;
1859 Entry.isZExt = !isSigned;
1860 Args.push_back(Entry);
1861 }
1862 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
1863 TLI.getPointerTy());
1864
1865 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
1866
1867 // By default, the input chain to this libcall is the entry node of the
1868 // function. If the libcall is going to be emitted as a tail call then
1869 // TLI.isUsedByReturnOnly will change it to the right chain if the return
1870 // node which is being folded has a non-entry input chain.
1871 SDValue InChain = DAG.getEntryNode();
1872
1873 // isTailCall may be true since the callee does not reference caller stack
1874 // frame. Check if it's in the right position.
1875 SDValue TCChain = InChain;
1876 bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain);
1877 if (isTailCall)
1878 InChain = TCChain;
1879
1880 TargetLowering::
1881 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
1882 0, TLI.getLibcallCallingConv(LC), isTailCall,
1883 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1884 Callee, Args, DAG, Node->getDebugLoc());
1885 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
1886
1887
1888 if (!CallInfo.second.getNode())
1889 // It's a tailcall, return the chain (which is the DAG root).
1890 return DAG.getRoot();
1891
1892 return CallInfo.first;
1893 }
1894
1895 /// ExpandLibCall - Generate a libcall taking the given operands as arguments
1896 /// and returning a result of type RetVT.
ExpandLibCall(RTLIB::Libcall LC,EVT RetVT,const SDValue * Ops,unsigned NumOps,bool isSigned,DebugLoc dl)1897 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
1898 const SDValue *Ops, unsigned NumOps,
1899 bool isSigned, DebugLoc dl) {
1900 TargetLowering::ArgListTy Args;
1901 Args.reserve(NumOps);
1902
1903 TargetLowering::ArgListEntry Entry;
1904 for (unsigned i = 0; i != NumOps; ++i) {
1905 Entry.Node = Ops[i];
1906 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
1907 Entry.isSExt = isSigned;
1908 Entry.isZExt = !isSigned;
1909 Args.push_back(Entry);
1910 }
1911 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
1912 TLI.getPointerTy());
1913
1914 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
1915 TargetLowering::
1916 CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
1917 false, 0, TLI.getLibcallCallingConv(LC),
1918 /*isTailCall=*/false,
1919 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1920 Callee, Args, DAG, dl);
1921 std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
1922
1923 return CallInfo.first;
1924 }
1925
1926 // ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
1927 // ExpandLibCall except that the first operand is the in-chain.
1928 std::pair<SDValue, SDValue>
ExpandChainLibCall(RTLIB::Libcall LC,SDNode * Node,bool isSigned)1929 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
1930 SDNode *Node,
1931 bool isSigned) {
1932 SDValue InChain = Node->getOperand(0);
1933
1934 TargetLowering::ArgListTy Args;
1935 TargetLowering::ArgListEntry Entry;
1936 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
1937 EVT ArgVT = Node->getOperand(i).getValueType();
1938 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1939 Entry.Node = Node->getOperand(i);
1940 Entry.Ty = ArgTy;
1941 Entry.isSExt = isSigned;
1942 Entry.isZExt = !isSigned;
1943 Args.push_back(Entry);
1944 }
1945 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
1946 TLI.getPointerTy());
1947
1948 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
1949 TargetLowering::
1950 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
1951 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
1952 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1953 Callee, Args, DAG, Node->getDebugLoc());
1954 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
1955
1956 return CallInfo;
1957 }
1958
ExpandFPLibCall(SDNode * Node,RTLIB::Libcall Call_F32,RTLIB::Libcall Call_F64,RTLIB::Libcall Call_F80,RTLIB::Libcall Call_F128,RTLIB::Libcall Call_PPCF128)1959 SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
1960 RTLIB::Libcall Call_F32,
1961 RTLIB::Libcall Call_F64,
1962 RTLIB::Libcall Call_F80,
1963 RTLIB::Libcall Call_F128,
1964 RTLIB::Libcall Call_PPCF128) {
1965 RTLIB::Libcall LC;
1966 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
1967 default: llvm_unreachable("Unexpected request for libcall!");
1968 case MVT::f32: LC = Call_F32; break;
1969 case MVT::f64: LC = Call_F64; break;
1970 case MVT::f80: LC = Call_F80; break;
1971 case MVT::f128: LC = Call_F128; break;
1972 case MVT::ppcf128: LC = Call_PPCF128; break;
1973 }
1974 return ExpandLibCall(LC, Node, false);
1975 }
1976
ExpandIntLibCall(SDNode * Node,bool isSigned,RTLIB::Libcall Call_I8,RTLIB::Libcall Call_I16,RTLIB::Libcall Call_I32,RTLIB::Libcall Call_I64,RTLIB::Libcall Call_I128)1977 SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
1978 RTLIB::Libcall Call_I8,
1979 RTLIB::Libcall Call_I16,
1980 RTLIB::Libcall Call_I32,
1981 RTLIB::Libcall Call_I64,
1982 RTLIB::Libcall Call_I128) {
1983 RTLIB::Libcall LC;
1984 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
1985 default: llvm_unreachable("Unexpected request for libcall!");
1986 case MVT::i8: LC = Call_I8; break;
1987 case MVT::i16: LC = Call_I16; break;
1988 case MVT::i32: LC = Call_I32; break;
1989 case MVT::i64: LC = Call_I64; break;
1990 case MVT::i128: LC = Call_I128; break;
1991 }
1992 return ExpandLibCall(LC, Node, isSigned);
1993 }
1994
1995 /// isDivRemLibcallAvailable - Return true if divmod libcall is available.
isDivRemLibcallAvailable(SDNode * Node,bool isSigned,const TargetLowering & TLI)1996 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
1997 const TargetLowering &TLI) {
1998 RTLIB::Libcall LC;
1999 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
2000 default: llvm_unreachable("Unexpected request for libcall!");
2001 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2002 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2003 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2004 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2005 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2006 }
2007
2008 return TLI.getLibcallName(LC) != 0;
2009 }
2010
2011 /// useDivRem - Only issue divrem libcall if both quotient and remainder are
2012 /// needed.
useDivRem(SDNode * Node,bool isSigned,bool isDIV)2013 static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) {
2014 // The other use might have been replaced with a divrem already.
2015 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
2016 unsigned OtherOpcode = 0;
2017 if (isSigned)
2018 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV;
2019 else
2020 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV;
2021
2022 SDValue Op0 = Node->getOperand(0);
2023 SDValue Op1 = Node->getOperand(1);
2024 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
2025 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
2026 SDNode *User = *UI;
2027 if (User == Node)
2028 continue;
2029 if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) &&
2030 User->getOperand(0) == Op0 &&
2031 User->getOperand(1) == Op1)
2032 return true;
2033 }
2034 return false;
2035 }
2036
2037 /// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem
2038 /// pairs.
2039 void
ExpandDivRemLibCall(SDNode * Node,SmallVectorImpl<SDValue> & Results)2040 SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
2041 SmallVectorImpl<SDValue> &Results) {
2042 unsigned Opcode = Node->getOpcode();
2043 bool isSigned = Opcode == ISD::SDIVREM;
2044
2045 RTLIB::Libcall LC;
2046 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
2047 default: llvm_unreachable("Unexpected request for libcall!");
2048 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2049 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2050 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2051 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2052 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2053 }
2054
2055 // The input chain to this libcall is the entry node of the function.
2056 // Legalizing the call will automatically add the previous call to the
2057 // dependence.
2058 SDValue InChain = DAG.getEntryNode();
2059
2060 EVT RetVT = Node->getValueType(0);
2061 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2062
2063 TargetLowering::ArgListTy Args;
2064 TargetLowering::ArgListEntry Entry;
2065 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
2066 EVT ArgVT = Node->getOperand(i).getValueType();
2067 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2068 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
2069 Entry.isSExt = isSigned;
2070 Entry.isZExt = !isSigned;
2071 Args.push_back(Entry);
2072 }
2073
2074 // Also pass the return address of the remainder.
2075 SDValue FIPtr = DAG.CreateStackTemporary(RetVT);
2076 Entry.Node = FIPtr;
2077 Entry.Ty = RetTy->getPointerTo();
2078 Entry.isSExt = isSigned;
2079 Entry.isZExt = !isSigned;
2080 Args.push_back(Entry);
2081
2082 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2083 TLI.getPointerTy());
2084
2085 DebugLoc dl = Node->getDebugLoc();
2086 TargetLowering::
2087 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
2088 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
2089 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2090 Callee, Args, DAG, dl);
2091 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2092
2093 // Remainder is loaded back from the stack frame.
2094 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr,
2095 MachinePointerInfo(), false, false, false, 0);
2096 Results.push_back(CallInfo.first);
2097 Results.push_back(Rem);
2098 }
2099
2100 /// isSinCosLibcallAvailable - Return true if sincos libcall is available.
isSinCosLibcallAvailable(SDNode * Node,const TargetLowering & TLI)2101 static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
2102 RTLIB::Libcall LC;
2103 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
2104 default: llvm_unreachable("Unexpected request for libcall!");
2105 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2106 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2107 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2108 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2109 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2110 }
2111 return TLI.getLibcallName(LC) != 0;
2112 }
2113
2114 /// canCombineSinCosLibcall - Return true if sincos libcall is available and
2115 /// can be used to combine sin and cos.
canCombineSinCosLibcall(SDNode * Node,const TargetLowering & TLI,const TargetMachine & TM)2116 static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI,
2117 const TargetMachine &TM) {
2118 if (!isSinCosLibcallAvailable(Node, TLI))
2119 return false;
2120 // GNU sin/cos functions set errno while sincos does not. Therefore
2121 // combining sin and cos is only safe if unsafe-fpmath is enabled.
2122 bool isGNU = Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU;
2123 if (isGNU && !TM.Options.UnsafeFPMath)
2124 return false;
2125 return true;
2126 }
2127
2128 /// useSinCos - Only issue sincos libcall if both sin and cos are
2129 /// needed.
useSinCos(SDNode * Node)2130 static bool useSinCos(SDNode *Node) {
2131 unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
2132 ? ISD::FCOS : ISD::FSIN;
2133
2134 SDValue Op0 = Node->getOperand(0);
2135 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
2136 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
2137 SDNode *User = *UI;
2138 if (User == Node)
2139 continue;
2140 // The other user might have been turned into sincos already.
2141 if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS)
2142 return true;
2143 }
2144 return false;
2145 }
2146
2147 /// ExpandSinCosLibCall - Issue libcalls to sincos to compute sin / cos
2148 /// pairs.
2149 void
ExpandSinCosLibCall(SDNode * Node,SmallVectorImpl<SDValue> & Results)2150 SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
2151 SmallVectorImpl<SDValue> &Results) {
2152 RTLIB::Libcall LC;
2153 switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
2154 default: llvm_unreachable("Unexpected request for libcall!");
2155 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2156 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2157 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2158 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2159 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2160 }
2161
2162 // The input chain to this libcall is the entry node of the function.
2163 // Legalizing the call will automatically add the previous call to the
2164 // dependence.
2165 SDValue InChain = DAG.getEntryNode();
2166
2167 EVT RetVT = Node->getValueType(0);
2168 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2169
2170 TargetLowering::ArgListTy Args;
2171 TargetLowering::ArgListEntry Entry;
2172
2173 // Pass the argument.
2174 Entry.Node = Node->getOperand(0);
2175 Entry.Ty = RetTy;
2176 Entry.isSExt = false;
2177 Entry.isZExt = false;
2178 Args.push_back(Entry);
2179
2180 // Pass the return address of sin.
2181 SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
2182 Entry.Node = SinPtr;
2183 Entry.Ty = RetTy->getPointerTo();
2184 Entry.isSExt = false;
2185 Entry.isZExt = false;
2186 Args.push_back(Entry);
2187
2188 // Also pass the return address of the cos.
2189 SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
2190 Entry.Node = CosPtr;
2191 Entry.Ty = RetTy->getPointerTo();
2192 Entry.isSExt = false;
2193 Entry.isZExt = false;
2194 Args.push_back(Entry);
2195
2196 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2197 TLI.getPointerTy());
2198
2199 DebugLoc dl = Node->getDebugLoc();
2200 TargetLowering::
2201 CallLoweringInfo CLI(InChain, Type::getVoidTy(*DAG.getContext()),
2202 false, false, false, false,
2203 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
2204 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2205 Callee, Args, DAG, dl);
2206 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2207
2208 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr,
2209 MachinePointerInfo(), false, false, false, 0));
2210 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr,
2211 MachinePointerInfo(), false, false, false, 0));
2212 }
2213
2214 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
2215 /// INT_TO_FP operation of the specified operand when the target requests that
2216 /// we expand it. At this point, we know that the result and operand types are
2217 /// legal for the target.
ExpandLegalINT_TO_FP(bool isSigned,SDValue Op0,EVT DestVT,DebugLoc dl)2218 SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
2219 SDValue Op0,
2220 EVT DestVT,
2221 DebugLoc dl) {
2222 if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
2223 // simple 32-bit [signed|unsigned] integer to float/double expansion
2224
2225 // Get the stack frame index of a 8 byte buffer.
2226 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
2227
2228 // word offset constant for Hi/Lo address computation
2229 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy());
2230 // set up Hi and Lo (into buffer) address based on endian
2231 SDValue Hi = StackSlot;
2232 SDValue Lo = DAG.getNode(ISD::ADD, dl,
2233 TLI.getPointerTy(), StackSlot, WordOff);
2234 if (TLI.isLittleEndian())
2235 std::swap(Hi, Lo);
2236
2237 // if signed map to unsigned space
2238 SDValue Op0Mapped;
2239 if (isSigned) {
2240 // constant used to invert sign bit (signed to unsigned mapping)
2241 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32);
2242 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit);
2243 } else {
2244 Op0Mapped = Op0;
2245 }
2246 // store the lo of the constructed double - based on integer input
2247 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl,
2248 Op0Mapped, Lo, MachinePointerInfo(),
2249 false, false, 0);
2250 // initial hi portion of constructed double
2251 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
2252 // store the hi of the constructed double - biased exponent
2253 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi,
2254 MachinePointerInfo(),
2255 false, false, 0);
2256 // load the constructed double
2257 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot,
2258 MachinePointerInfo(), false, false, false, 0);
2259 // FP constant to bias correct the final result
2260 SDValue Bias = DAG.getConstantFP(isSigned ?
2261 BitsToDouble(0x4330000080000000ULL) :
2262 BitsToDouble(0x4330000000000000ULL),
2263 MVT::f64);
2264 // subtract the bias
2265 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias);
2266 // final result
2267 SDValue Result;
2268 // handle final rounding
2269 if (DestVT == MVT::f64) {
2270 // do nothing
2271 Result = Sub;
2272 } else if (DestVT.bitsLT(MVT::f64)) {
2273 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
2274 DAG.getIntPtrConstant(0));
2275 } else if (DestVT.bitsGT(MVT::f64)) {
2276 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
2277 }
2278 return Result;
2279 }
2280 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
2281 // Code below here assumes !isSigned without checking again.
2282
2283 // Implementation of unsigned i64 to f64 following the algorithm in
2284 // __floatundidf in compiler_rt. This implementation has the advantage
2285 // of performing rounding correctly, both in the default rounding mode
2286 // and in all alternate rounding modes.
2287 // TODO: Generalize this for use with other types.
2288 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) {
2289 SDValue TwoP52 =
2290 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64);
2291 SDValue TwoP84PlusTwoP52 =
2292 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64);
2293 SDValue TwoP84 =
2294 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64);
2295
2296 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32);
2297 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0,
2298 DAG.getConstant(32, MVT::i64));
2299 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
2300 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
2301 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
2302 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
2303 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
2304 TwoP84PlusTwoP52);
2305 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
2306 }
2307
2308 // Implementation of unsigned i64 to f32.
2309 // TODO: Generalize this for use with other types.
2310 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
2311 // For unsigned conversions, convert them to signed conversions using the
2312 // algorithm from the x86_64 __floatundidf in compiler_rt.
2313 if (!isSigned) {
2314 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
2315
2316 SDValue ShiftConst =
2317 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType()));
2318 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
2319 SDValue AndConst = DAG.getConstant(1, MVT::i64);
2320 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
2321 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
2322
2323 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
2324 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
2325
2326 // TODO: This really should be implemented using a branch rather than a
2327 // select. We happen to get lucky and machinesink does the right
2328 // thing most of the time. This would be a good candidate for a
2329 //pseudo-op, or, even better, for whole-function isel.
2330 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
2331 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
2332 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast);
2333 }
2334
2335 // Otherwise, implement the fully general conversion.
2336
2337 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
2338 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
2339 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
2340 DAG.getConstant(UINT64_C(0x800), MVT::i64));
2341 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
2342 DAG.getConstant(UINT64_C(0x7ff), MVT::i64));
2343 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
2344 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE);
2345 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0);
2346 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
2347 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
2348 ISD::SETUGE);
2349 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
2350 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType());
2351
2352 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
2353 DAG.getConstant(32, SHVT));
2354 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
2355 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc);
2356 SDValue TwoP32 =
2357 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64);
2358 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt);
2359 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2);
2360 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo);
2361 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2);
2362 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd,
2363 DAG.getIntPtrConstant(0));
2364 }
2365
2366 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
2367
2368 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()),
2369 Op0, DAG.getConstant(0, Op0.getValueType()),
2370 ISD::SETLT);
2371 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
2372 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(),
2373 SignSet, Four, Zero);
2374
2375 // If the sign bit of the integer is set, the large number will be treated
2376 // as a negative number. To counteract this, the dynamic code adds an
2377 // offset depending on the data type.
2378 uint64_t FF;
2379 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
2380 default: llvm_unreachable("Unsupported integer type!");
2381 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
2382 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
2383 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
2384 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
2385 }
2386 if (TLI.isLittleEndian()) FF <<= 32;
2387 Constant *FudgeFactor = ConstantInt::get(
2388 Type::getInt64Ty(*DAG.getContext()), FF);
2389
2390 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
2391 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
2392 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset);
2393 Alignment = std::min(Alignment, 4u);
2394 SDValue FudgeInReg;
2395 if (DestVT == MVT::f32)
2396 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
2397 MachinePointerInfo::getConstantPool(),
2398 false, false, false, Alignment);
2399 else {
2400 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
2401 DAG.getEntryNode(), CPIdx,
2402 MachinePointerInfo::getConstantPool(),
2403 MVT::f32, false, false, Alignment);
2404 HandleSDNode Handle(Load);
2405 LegalizeOp(Load.getNode());
2406 FudgeInReg = Handle.getValue();
2407 }
2408
2409 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
2410 }
2411
2412 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2413 /// *INT_TO_FP operation of the specified operand when the target requests that
2414 /// we promote it. At this point, we know that the result and operand types are
2415 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2416 /// operation that takes a larger input.
PromoteLegalINT_TO_FP(SDValue LegalOp,EVT DestVT,bool isSigned,DebugLoc dl)2417 SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
2418 EVT DestVT,
2419 bool isSigned,
2420 DebugLoc dl) {
2421 // First step, figure out the appropriate *INT_TO_FP operation to use.
2422 EVT NewInTy = LegalOp.getValueType();
2423
2424 unsigned OpToUse = 0;
2425
2426 // Scan for the appropriate larger type to use.
2427 while (1) {
2428 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
2429 assert(NewInTy.isInteger() && "Ran out of possibilities!");
2430
2431 // If the target supports SINT_TO_FP of this type, use it.
2432 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) {
2433 OpToUse = ISD::SINT_TO_FP;
2434 break;
2435 }
2436 if (isSigned) continue;
2437
2438 // If the target supports UINT_TO_FP of this type, use it.
2439 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) {
2440 OpToUse = ISD::UINT_TO_FP;
2441 break;
2442 }
2443
2444 // Otherwise, try a larger type.
2445 }
2446
2447 // Okay, we found the operation and type to use. Zero extend our input to the
2448 // desired type then run the operation on it.
2449 return DAG.getNode(OpToUse, dl, DestVT,
2450 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
2451 dl, NewInTy, LegalOp));
2452 }
2453
2454 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2455 /// FP_TO_*INT operation of the specified operand when the target requests that
2456 /// we promote it. At this point, we know that the result and operand types are
2457 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2458 /// operation that returns a larger result.
PromoteLegalFP_TO_INT(SDValue LegalOp,EVT DestVT,bool isSigned,DebugLoc dl)2459 SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
2460 EVT DestVT,
2461 bool isSigned,
2462 DebugLoc dl) {
2463 // First step, figure out the appropriate FP_TO*INT operation to use.
2464 EVT NewOutTy = DestVT;
2465
2466 unsigned OpToUse = 0;
2467
2468 // Scan for the appropriate larger type to use.
2469 while (1) {
2470 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
2471 assert(NewOutTy.isInteger() && "Ran out of possibilities!");
2472
2473 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
2474 OpToUse = ISD::FP_TO_SINT;
2475 break;
2476 }
2477
2478 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) {
2479 OpToUse = ISD::FP_TO_UINT;
2480 break;
2481 }
2482
2483 // Otherwise, try a larger type.
2484 }
2485
2486
2487 // Okay, we found the operation and type to use.
2488 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp);
2489
2490 // Truncate the result of the extended FP_TO_*INT operation to the desired
2491 // size.
2492 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation);
2493 }
2494
2495 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2496 ///
ExpandBSWAP(SDValue Op,DebugLoc dl)2497 SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
2498 EVT VT = Op.getValueType();
2499 EVT SHVT = TLI.getShiftAmountTy(VT);
2500 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
2501 switch (VT.getSimpleVT().SimpleTy) {
2502 default: llvm_unreachable("Unhandled Expand type in BSWAP!");
2503 case MVT::i16:
2504 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2505 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2506 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2507 case MVT::i32:
2508 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
2509 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2510 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2511 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT));
2512 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT));
2513 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT));
2514 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
2515 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
2516 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
2517 case MVT::i64:
2518 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT));
2519 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT));
2520 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
2521 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
2522 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
2523 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT));
2524 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT));
2525 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT));
2526 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT));
2527 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT));
2528 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT));
2529 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT));
2530 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT));
2531 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT));
2532 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7);
2533 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5);
2534 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
2535 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
2536 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6);
2537 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
2538 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4);
2539 }
2540 }
2541
2542 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2543 ///
ExpandBitCount(unsigned Opc,SDValue Op,DebugLoc dl)2544 SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
2545 DebugLoc dl) {
2546 switch (Opc) {
2547 default: llvm_unreachable("Cannot expand this yet!");
2548 case ISD::CTPOP: {
2549 EVT VT = Op.getValueType();
2550 EVT ShVT = TLI.getShiftAmountTy(VT);
2551 unsigned Len = VT.getSizeInBits();
2552
2553 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
2554 "CTPOP not implemented for this type.");
2555
2556 // This is the "best" algorithm from
2557 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
2558
2559 SDValue Mask55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), VT);
2560 SDValue Mask33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), VT);
2561 SDValue Mask0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), VT);
2562 SDValue Mask01 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), VT);
2563
2564 // v = v - ((v >> 1) & 0x55555555...)
2565 Op = DAG.getNode(ISD::SUB, dl, VT, Op,
2566 DAG.getNode(ISD::AND, dl, VT,
2567 DAG.getNode(ISD::SRL, dl, VT, Op,
2568 DAG.getConstant(1, ShVT)),
2569 Mask55));
2570 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
2571 Op = DAG.getNode(ISD::ADD, dl, VT,
2572 DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
2573 DAG.getNode(ISD::AND, dl, VT,
2574 DAG.getNode(ISD::SRL, dl, VT, Op,
2575 DAG.getConstant(2, ShVT)),
2576 Mask33));
2577 // v = (v + (v >> 4)) & 0x0F0F0F0F...
2578 Op = DAG.getNode(ISD::AND, dl, VT,
2579 DAG.getNode(ISD::ADD, dl, VT, Op,
2580 DAG.getNode(ISD::SRL, dl, VT, Op,
2581 DAG.getConstant(4, ShVT))),
2582 Mask0F);
2583 // v = (v * 0x01010101...) >> (Len - 8)
2584 Op = DAG.getNode(ISD::SRL, dl, VT,
2585 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
2586 DAG.getConstant(Len - 8, ShVT));
2587
2588 return Op;
2589 }
2590 case ISD::CTLZ_ZERO_UNDEF:
2591 // This trivially expands to CTLZ.
2592 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op);
2593 case ISD::CTLZ: {
2594 // for now, we do this:
2595 // x = x | (x >> 1);
2596 // x = x | (x >> 2);
2597 // ...
2598 // x = x | (x >>16);
2599 // x = x | (x >>32); // for 64-bit input
2600 // return popcount(~x);
2601 //
2602 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2603 EVT VT = Op.getValueType();
2604 EVT ShVT = TLI.getShiftAmountTy(VT);
2605 unsigned len = VT.getSizeInBits();
2606 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
2607 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT);
2608 Op = DAG.getNode(ISD::OR, dl, VT, Op,
2609 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3));
2610 }
2611 Op = DAG.getNOT(dl, Op, VT);
2612 return DAG.getNode(ISD::CTPOP, dl, VT, Op);
2613 }
2614 case ISD::CTTZ_ZERO_UNDEF:
2615 // This trivially expands to CTTZ.
2616 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op);
2617 case ISD::CTTZ: {
2618 // for now, we use: { return popcount(~x & (x - 1)); }
2619 // unless the target has ctlz but not ctpop, in which case we use:
2620 // { return 32 - nlz(~x & (x-1)); }
2621 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2622 EVT VT = Op.getValueType();
2623 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT,
2624 DAG.getNOT(dl, Op, VT),
2625 DAG.getNode(ISD::SUB, dl, VT, Op,
2626 DAG.getConstant(1, VT)));
2627 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2628 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) &&
2629 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT))
2630 return DAG.getNode(ISD::SUB, dl, VT,
2631 DAG.getConstant(VT.getSizeInBits(), VT),
2632 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3));
2633 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3);
2634 }
2635 }
2636 }
2637
ExpandAtomic(SDNode * Node)2638 std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
2639 unsigned Opc = Node->getOpcode();
2640 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
2641 RTLIB::Libcall LC;
2642
2643 switch (Opc) {
2644 default:
2645 llvm_unreachable("Unhandled atomic intrinsic Expand!");
2646 case ISD::ATOMIC_SWAP:
2647 switch (VT.SimpleTy) {
2648 default: llvm_unreachable("Unexpected value type for atomic!");
2649 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break;
2650 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break;
2651 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break;
2652 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break;
2653 }
2654 break;
2655 case ISD::ATOMIC_CMP_SWAP:
2656 switch (VT.SimpleTy) {
2657 default: llvm_unreachable("Unexpected value type for atomic!");
2658 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break;
2659 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break;
2660 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break;
2661 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break;
2662 }
2663 break;
2664 case ISD::ATOMIC_LOAD_ADD:
2665 switch (VT.SimpleTy) {
2666 default: llvm_unreachable("Unexpected value type for atomic!");
2667 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break;
2668 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break;
2669 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break;
2670 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break;
2671 }
2672 break;
2673 case ISD::ATOMIC_LOAD_SUB:
2674 switch (VT.SimpleTy) {
2675 default: llvm_unreachable("Unexpected value type for atomic!");
2676 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break;
2677 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break;
2678 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break;
2679 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break;
2680 }
2681 break;
2682 case ISD::ATOMIC_LOAD_AND:
2683 switch (VT.SimpleTy) {
2684 default: llvm_unreachable("Unexpected value type for atomic!");
2685 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break;
2686 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break;
2687 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break;
2688 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break;
2689 }
2690 break;
2691 case ISD::ATOMIC_LOAD_OR:
2692 switch (VT.SimpleTy) {
2693 default: llvm_unreachable("Unexpected value type for atomic!");
2694 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break;
2695 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break;
2696 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break;
2697 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break;
2698 }
2699 break;
2700 case ISD::ATOMIC_LOAD_XOR:
2701 switch (VT.SimpleTy) {
2702 default: llvm_unreachable("Unexpected value type for atomic!");
2703 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break;
2704 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break;
2705 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break;
2706 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break;
2707 }
2708 break;
2709 case ISD::ATOMIC_LOAD_NAND:
2710 switch (VT.SimpleTy) {
2711 default: llvm_unreachable("Unexpected value type for atomic!");
2712 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break;
2713 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break;
2714 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break;
2715 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break;
2716 }
2717 break;
2718 }
2719
2720 return ExpandChainLibCall(LC, Node, false);
2721 }
2722
ExpandNode(SDNode * Node)2723 void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
2724 SmallVector<SDValue, 8> Results;
2725 DebugLoc dl = Node->getDebugLoc();
2726 SDValue Tmp1, Tmp2, Tmp3, Tmp4;
2727 switch (Node->getOpcode()) {
2728 case ISD::CTPOP:
2729 case ISD::CTLZ:
2730 case ISD::CTLZ_ZERO_UNDEF:
2731 case ISD::CTTZ:
2732 case ISD::CTTZ_ZERO_UNDEF:
2733 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl);
2734 Results.push_back(Tmp1);
2735 break;
2736 case ISD::BSWAP:
2737 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl));
2738 break;
2739 case ISD::FRAMEADDR:
2740 case ISD::RETURNADDR:
2741 case ISD::FRAME_TO_ARGS_OFFSET:
2742 Results.push_back(DAG.getConstant(0, Node->getValueType(0)));
2743 break;
2744 case ISD::FLT_ROUNDS_:
2745 Results.push_back(DAG.getConstant(1, Node->getValueType(0)));
2746 break;
2747 case ISD::EH_RETURN:
2748 case ISD::EH_LABEL:
2749 case ISD::PREFETCH:
2750 case ISD::VAEND:
2751 case ISD::EH_SJLJ_LONGJMP:
2752 // If the target didn't expand these, there's nothing to do, so just
2753 // preserve the chain and be done.
2754 Results.push_back(Node->getOperand(0));
2755 break;
2756 case ISD::EH_SJLJ_SETJMP:
2757 // If the target didn't expand this, just return 'zero' and preserve the
2758 // chain.
2759 Results.push_back(DAG.getConstant(0, MVT::i32));
2760 Results.push_back(Node->getOperand(0));
2761 break;
2762 case ISD::ATOMIC_FENCE:
2763 case ISD::MEMBARRIER: {
2764 // If the target didn't lower this, lower it to '__sync_synchronize()' call
2765 // FIXME: handle "fence singlethread" more efficiently.
2766 TargetLowering::ArgListTy Args;
2767 TargetLowering::
2768 CallLoweringInfo CLI(Node->getOperand(0),
2769 Type::getVoidTy(*DAG.getContext()),
2770 false, false, false, false, 0, CallingConv::C,
2771 /*isTailCall=*/false,
2772 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2773 DAG.getExternalSymbol("__sync_synchronize",
2774 TLI.getPointerTy()),
2775 Args, DAG, dl);
2776 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
2777
2778 Results.push_back(CallResult.second);
2779 break;
2780 }
2781 case ISD::ATOMIC_LOAD: {
2782 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP.
2783 SDValue Zero = DAG.getConstant(0, Node->getValueType(0));
2784 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
2785 cast<AtomicSDNode>(Node)->getMemoryVT(),
2786 Node->getOperand(0),
2787 Node->getOperand(1), Zero, Zero,
2788 cast<AtomicSDNode>(Node)->getMemOperand(),
2789 cast<AtomicSDNode>(Node)->getOrdering(),
2790 cast<AtomicSDNode>(Node)->getSynchScope());
2791 Results.push_back(Swap.getValue(0));
2792 Results.push_back(Swap.getValue(1));
2793 break;
2794 }
2795 case ISD::ATOMIC_STORE: {
2796 // There is no libcall for atomic store; fake it with ATOMIC_SWAP.
2797 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
2798 cast<AtomicSDNode>(Node)->getMemoryVT(),
2799 Node->getOperand(0),
2800 Node->getOperand(1), Node->getOperand(2),
2801 cast<AtomicSDNode>(Node)->getMemOperand(),
2802 cast<AtomicSDNode>(Node)->getOrdering(),
2803 cast<AtomicSDNode>(Node)->getSynchScope());
2804 Results.push_back(Swap.getValue(1));
2805 break;
2806 }
2807 // By default, atomic intrinsics are marked Legal and lowered. Targets
2808 // which don't support them directly, however, may want libcalls, in which
2809 // case they mark them Expand, and we get here.
2810 case ISD::ATOMIC_SWAP:
2811 case ISD::ATOMIC_LOAD_ADD:
2812 case ISD::ATOMIC_LOAD_SUB:
2813 case ISD::ATOMIC_LOAD_AND:
2814 case ISD::ATOMIC_LOAD_OR:
2815 case ISD::ATOMIC_LOAD_XOR:
2816 case ISD::ATOMIC_LOAD_NAND:
2817 case ISD::ATOMIC_LOAD_MIN:
2818 case ISD::ATOMIC_LOAD_MAX:
2819 case ISD::ATOMIC_LOAD_UMIN:
2820 case ISD::ATOMIC_LOAD_UMAX:
2821 case ISD::ATOMIC_CMP_SWAP: {
2822 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
2823 Results.push_back(Tmp.first);
2824 Results.push_back(Tmp.second);
2825 break;
2826 }
2827 case ISD::DYNAMIC_STACKALLOC:
2828 ExpandDYNAMIC_STACKALLOC(Node, Results);
2829 break;
2830 case ISD::MERGE_VALUES:
2831 for (unsigned i = 0; i < Node->getNumValues(); i++)
2832 Results.push_back(Node->getOperand(i));
2833 break;
2834 case ISD::UNDEF: {
2835 EVT VT = Node->getValueType(0);
2836 if (VT.isInteger())
2837 Results.push_back(DAG.getConstant(0, VT));
2838 else {
2839 assert(VT.isFloatingPoint() && "Unknown value type!");
2840 Results.push_back(DAG.getConstantFP(0, VT));
2841 }
2842 break;
2843 }
2844 case ISD::TRAP: {
2845 // If this operation is not supported, lower it to 'abort()' call
2846 TargetLowering::ArgListTy Args;
2847 TargetLowering::
2848 CallLoweringInfo CLI(Node->getOperand(0),
2849 Type::getVoidTy(*DAG.getContext()),
2850 false, false, false, false, 0, CallingConv::C,
2851 /*isTailCall=*/false,
2852 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2853 DAG.getExternalSymbol("abort", TLI.getPointerTy()),
2854 Args, DAG, dl);
2855 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
2856
2857 Results.push_back(CallResult.second);
2858 break;
2859 }
2860 case ISD::FP_ROUND:
2861 case ISD::BITCAST:
2862 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
2863 Node->getValueType(0), dl);
2864 Results.push_back(Tmp1);
2865 break;
2866 case ISD::FP_EXTEND:
2867 Tmp1 = EmitStackConvert(Node->getOperand(0),
2868 Node->getOperand(0).getValueType(),
2869 Node->getValueType(0), dl);
2870 Results.push_back(Tmp1);
2871 break;
2872 case ISD::SIGN_EXTEND_INREG: {
2873 // NOTE: we could fall back on load/store here too for targets without
2874 // SAR. However, it is doubtful that any exist.
2875 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
2876 EVT VT = Node->getValueType(0);
2877 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT);
2878 if (VT.isVector())
2879 ShiftAmountTy = VT;
2880 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
2881 ExtraVT.getScalarType().getSizeInBits();
2882 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy);
2883 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
2884 Node->getOperand(0), ShiftCst);
2885 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst);
2886 Results.push_back(Tmp1);
2887 break;
2888 }
2889 case ISD::FP_ROUND_INREG: {
2890 // The only way we can lower this is to turn it into a TRUNCSTORE,
2891 // EXTLOAD pair, targeting a temporary location (a stack slot).
2892
2893 // NOTE: there is a choice here between constantly creating new stack
2894 // slots and always reusing the same one. We currently always create
2895 // new ones, as reuse may inhibit scheduling.
2896 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
2897 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT,
2898 Node->getValueType(0), dl);
2899 Results.push_back(Tmp1);
2900 break;
2901 }
2902 case ISD::SINT_TO_FP:
2903 case ISD::UINT_TO_FP:
2904 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP,
2905 Node->getOperand(0), Node->getValueType(0), dl);
2906 Results.push_back(Tmp1);
2907 break;
2908 case ISD::FP_TO_UINT: {
2909 SDValue True, False;
2910 EVT VT = Node->getOperand(0).getValueType();
2911 EVT NVT = Node->getValueType(0);
2912 APFloat apf(DAG.EVTToAPFloatSemantics(VT),
2913 APInt::getNullValue(VT.getSizeInBits()));
2914 APInt x = APInt::getSignBit(NVT.getSizeInBits());
2915 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
2916 Tmp1 = DAG.getConstantFP(apf, VT);
2917 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT),
2918 Node->getOperand(0),
2919 Tmp1, ISD::SETLT);
2920 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0));
2921 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT,
2922 DAG.getNode(ISD::FSUB, dl, VT,
2923 Node->getOperand(0), Tmp1));
2924 False = DAG.getNode(ISD::XOR, dl, NVT, False,
2925 DAG.getConstant(x, NVT));
2926 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False);
2927 Results.push_back(Tmp1);
2928 break;
2929 }
2930 case ISD::VAARG: {
2931 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2932 EVT VT = Node->getValueType(0);
2933 Tmp1 = Node->getOperand(0);
2934 Tmp2 = Node->getOperand(1);
2935 unsigned Align = Node->getConstantOperandVal(3);
2936
2937 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
2938 MachinePointerInfo(V),
2939 false, false, false, 0);
2940 SDValue VAList = VAListLoad;
2941
2942 if (Align > TLI.getMinStackArgumentAlignment()) {
2943 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
2944
2945 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
2946 DAG.getConstant(Align - 1,
2947 TLI.getPointerTy()));
2948
2949 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList,
2950 DAG.getConstant(-(int64_t)Align,
2951 TLI.getPointerTy()));
2952 }
2953
2954 // Increment the pointer, VAList, to the next vaarg
2955 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
2956 DAG.getConstant(TLI.getDataLayout()->
2957 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
2958 TLI.getPointerTy()));
2959 // Store the incremented VAList to the legalized pointer
2960 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
2961 MachinePointerInfo(V), false, false, 0);
2962 // Load the actual argument out of the pointer VAList
2963 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
2964 false, false, false, 0));
2965 Results.push_back(Results[0].getValue(1));
2966 break;
2967 }
2968 case ISD::VACOPY: {
2969 // This defaults to loading a pointer from the input and storing it to the
2970 // output, returning the chain.
2971 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2972 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2973 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0),
2974 Node->getOperand(2), MachinePointerInfo(VS),
2975 false, false, false, 0);
2976 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2977 MachinePointerInfo(VD), false, false, 0);
2978 Results.push_back(Tmp1);
2979 break;
2980 }
2981 case ISD::EXTRACT_VECTOR_ELT:
2982 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
2983 // This must be an access of the only element. Return it.
2984 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
2985 Node->getOperand(0));
2986 else
2987 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
2988 Results.push_back(Tmp1);
2989 break;
2990 case ISD::EXTRACT_SUBVECTOR:
2991 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0)));
2992 break;
2993 case ISD::INSERT_SUBVECTOR:
2994 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0)));
2995 break;
2996 case ISD::CONCAT_VECTORS: {
2997 Results.push_back(ExpandVectorBuildThroughStack(Node));
2998 break;
2999 }
3000 case ISD::SCALAR_TO_VECTOR:
3001 Results.push_back(ExpandSCALAR_TO_VECTOR(Node));
3002 break;
3003 case ISD::INSERT_VECTOR_ELT:
3004 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0),
3005 Node->getOperand(1),
3006 Node->getOperand(2), dl));
3007 break;
3008 case ISD::VECTOR_SHUFFLE: {
3009 SmallVector<int, 32> NewMask;
3010 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
3011
3012 EVT VT = Node->getValueType(0);
3013 EVT EltVT = VT.getVectorElementType();
3014 SDValue Op0 = Node->getOperand(0);
3015 SDValue Op1 = Node->getOperand(1);
3016 if (!TLI.isTypeLegal(EltVT)) {
3017
3018 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
3019
3020 // BUILD_VECTOR operands are allowed to be wider than the element type.
3021 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept it
3022 if (NewEltVT.bitsLT(EltVT)) {
3023
3024 // Convert shuffle node.
3025 // If original node was v4i64 and the new EltVT is i32,
3026 // cast operands to v8i32 and re-build the mask.
3027
3028 // Calculate new VT, the size of the new VT should be equal to original.
3029 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT,
3030 VT.getSizeInBits()/NewEltVT.getSizeInBits());
3031 assert(NewVT.bitsEq(VT));
3032
3033 // cast operands to new VT
3034 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0);
3035 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
3036
3037 // Convert the shuffle mask
3038 unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements();
3039
3040 // EltVT gets smaller
3041 assert(factor > 0);
3042
3043 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
3044 if (Mask[i] < 0) {
3045 for (unsigned fi = 0; fi < factor; ++fi)
3046 NewMask.push_back(Mask[i]);
3047 }
3048 else {
3049 for (unsigned fi = 0; fi < factor; ++fi)
3050 NewMask.push_back(Mask[i]*factor+fi);
3051 }
3052 }
3053 Mask = NewMask;
3054 VT = NewVT;
3055 }
3056 EltVT = NewEltVT;
3057 }
3058 unsigned NumElems = VT.getVectorNumElements();
3059 SmallVector<SDValue, 16> Ops;
3060 for (unsigned i = 0; i != NumElems; ++i) {
3061 if (Mask[i] < 0) {
3062 Ops.push_back(DAG.getUNDEF(EltVT));
3063 continue;
3064 }
3065 unsigned Idx = Mask[i];
3066 if (Idx < NumElems)
3067 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
3068 Op0,
3069 DAG.getIntPtrConstant(Idx)));
3070 else
3071 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
3072 Op1,
3073 DAG.getIntPtrConstant(Idx - NumElems)));
3074 }
3075
3076 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
3077 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
3078 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
3079 Results.push_back(Tmp1);
3080 break;
3081 }
3082 case ISD::EXTRACT_ELEMENT: {
3083 EVT OpTy = Node->getOperand(0).getValueType();
3084 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
3085 // 1 -> Hi
3086 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
3087 DAG.getConstant(OpTy.getSizeInBits()/2,
3088 TLI.getShiftAmountTy(Node->getOperand(0).getValueType())));
3089 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
3090 } else {
3091 // 0 -> Lo
3092 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0),
3093 Node->getOperand(0));
3094 }
3095 Results.push_back(Tmp1);
3096 break;
3097 }
3098 case ISD::STACKSAVE:
3099 // Expand to CopyFromReg if the target set
3100 // StackPointerRegisterToSaveRestore.
3101 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
3102 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP,
3103 Node->getValueType(0)));
3104 Results.push_back(Results[0].getValue(1));
3105 } else {
3106 Results.push_back(DAG.getUNDEF(Node->getValueType(0)));
3107 Results.push_back(Node->getOperand(0));
3108 }
3109 break;
3110 case ISD::STACKRESTORE:
3111 // Expand to CopyToReg if the target set
3112 // StackPointerRegisterToSaveRestore.
3113 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) {
3114 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP,
3115 Node->getOperand(1)));
3116 } else {
3117 Results.push_back(Node->getOperand(0));
3118 }
3119 break;
3120 case ISD::FCOPYSIGN:
3121 Results.push_back(ExpandFCOPYSIGN(Node));
3122 break;
3123 case ISD::FNEG:
3124 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
3125 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0));
3126 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1,
3127 Node->getOperand(0));
3128 Results.push_back(Tmp1);
3129 break;
3130 case ISD::FABS: {
3131 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
3132 EVT VT = Node->getValueType(0);
3133 Tmp1 = Node->getOperand(0);
3134 Tmp2 = DAG.getConstantFP(0.0, VT);
3135 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()),
3136 Tmp1, Tmp2, ISD::SETUGT);
3137 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1);
3138 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3);
3139 Results.push_back(Tmp1);
3140 break;
3141 }
3142 case ISD::FSQRT:
3143 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64,
3144 RTLIB::SQRT_F80, RTLIB::SQRT_F128,
3145 RTLIB::SQRT_PPCF128));
3146 break;
3147 case ISD::FSIN:
3148 case ISD::FCOS: {
3149 EVT VT = Node->getValueType(0);
3150 bool isSIN = Node->getOpcode() == ISD::FSIN;
3151 // Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
3152 // fcos which share the same operand and both are used.
3153 if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
3154 canCombineSinCosLibcall(Node, TLI, TM))
3155 && useSinCos(Node)) {
3156 SDVTList VTs = DAG.getVTList(VT, VT);
3157 Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
3158 if (!isSIN)
3159 Tmp1 = Tmp1.getValue(1);
3160 Results.push_back(Tmp1);
3161 } else if (isSIN) {
3162 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64,
3163 RTLIB::SIN_F80, RTLIB::SIN_F128,
3164 RTLIB::SIN_PPCF128));
3165 } else {
3166 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64,
3167 RTLIB::COS_F80, RTLIB::COS_F128,
3168 RTLIB::COS_PPCF128));
3169 }
3170 break;
3171 }
3172 case ISD::FSINCOS:
3173 // Expand into sincos libcall.
3174 ExpandSinCosLibCall(Node, Results);
3175 break;
3176 case ISD::FLOG:
3177 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64,
3178 RTLIB::LOG_F80, RTLIB::LOG_F128,
3179 RTLIB::LOG_PPCF128));
3180 break;
3181 case ISD::FLOG2:
3182 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64,
3183 RTLIB::LOG2_F80, RTLIB::LOG2_F128,
3184 RTLIB::LOG2_PPCF128));
3185 break;
3186 case ISD::FLOG10:
3187 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64,
3188 RTLIB::LOG10_F80, RTLIB::LOG10_F128,
3189 RTLIB::LOG10_PPCF128));
3190 break;
3191 case ISD::FEXP:
3192 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64,
3193 RTLIB::EXP_F80, RTLIB::EXP_F128,
3194 RTLIB::EXP_PPCF128));
3195 break;
3196 case ISD::FEXP2:
3197 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64,
3198 RTLIB::EXP2_F80, RTLIB::EXP2_F128,
3199 RTLIB::EXP2_PPCF128));
3200 break;
3201 case ISD::FTRUNC:
3202 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64,
3203 RTLIB::TRUNC_F80, RTLIB::TRUNC_F128,
3204 RTLIB::TRUNC_PPCF128));
3205 break;
3206 case ISD::FFLOOR:
3207 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64,
3208 RTLIB::FLOOR_F80, RTLIB::FLOOR_F128,
3209 RTLIB::FLOOR_PPCF128));
3210 break;
3211 case ISD::FCEIL:
3212 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64,
3213 RTLIB::CEIL_F80, RTLIB::CEIL_F128,
3214 RTLIB::CEIL_PPCF128));
3215 break;
3216 case ISD::FRINT:
3217 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64,
3218 RTLIB::RINT_F80, RTLIB::RINT_F128,
3219 RTLIB::RINT_PPCF128));
3220 break;
3221 case ISD::FNEARBYINT:
3222 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32,
3223 RTLIB::NEARBYINT_F64,
3224 RTLIB::NEARBYINT_F80,
3225 RTLIB::NEARBYINT_F128,
3226 RTLIB::NEARBYINT_PPCF128));
3227 break;
3228 case ISD::FPOWI:
3229 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64,
3230 RTLIB::POWI_F80, RTLIB::POWI_F128,
3231 RTLIB::POWI_PPCF128));
3232 break;
3233 case ISD::FPOW:
3234 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64,
3235 RTLIB::POW_F80, RTLIB::POW_F128,
3236 RTLIB::POW_PPCF128));
3237 break;
3238 case ISD::FDIV:
3239 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64,
3240 RTLIB::DIV_F80, RTLIB::DIV_F128,
3241 RTLIB::DIV_PPCF128));
3242 break;
3243 case ISD::FREM:
3244 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64,
3245 RTLIB::REM_F80, RTLIB::REM_F128,
3246 RTLIB::REM_PPCF128));
3247 break;
3248 case ISD::FMA:
3249 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64,
3250 RTLIB::FMA_F80, RTLIB::FMA_F128,
3251 RTLIB::FMA_PPCF128));
3252 break;
3253 case ISD::FP16_TO_FP32:
3254 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
3255 break;
3256 case ISD::FP32_TO_FP16:
3257 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false));
3258 break;
3259 case ISD::ConstantFP: {
3260 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
3261 // Check to see if this FP immediate is already legal.
3262 // If this is a legal constant, turn it into a TargetConstantFP node.
3263 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0)))
3264 Results.push_back(ExpandConstantFP(CFP, true));
3265 break;
3266 }
3267 case ISD::EHSELECTION: {
3268 unsigned Reg = TLI.getExceptionSelectorRegister();
3269 assert(Reg && "Can't expand to unknown register!");
3270 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg,
3271 Node->getValueType(0)));
3272 Results.push_back(Results[0].getValue(1));
3273 break;
3274 }
3275 case ISD::EXCEPTIONADDR: {
3276 unsigned Reg = TLI.getExceptionPointerRegister();
3277 assert(Reg && "Can't expand to unknown register!");
3278 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg,
3279 Node->getValueType(0)));
3280 Results.push_back(Results[0].getValue(1));
3281 break;
3282 }
3283 case ISD::FSUB: {
3284 EVT VT = Node->getValueType(0);
3285 assert(TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
3286 TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
3287 "Don't know how to expand this FP subtraction!");
3288 Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
3289 Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1);
3290 Results.push_back(Tmp1);
3291 break;
3292 }
3293 case ISD::SUB: {
3294 EVT VT = Node->getValueType(0);
3295 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
3296 TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
3297 "Don't know how to expand this subtraction!");
3298 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1),
3299 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT));
3300 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, VT));
3301 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
3302 break;
3303 }
3304 case ISD::UREM:
3305 case ISD::SREM: {
3306 EVT VT = Node->getValueType(0);
3307 bool isSigned = Node->getOpcode() == ISD::SREM;
3308 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV;
3309 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3310 Tmp2 = Node->getOperand(0);
3311 Tmp3 = Node->getOperand(1);
3312 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
3313 (isDivRemLibcallAvailable(Node, isSigned, TLI) &&
3314 // If div is legal, it's better to do the normal expansion
3315 !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) &&
3316 useDivRem(Node, isSigned, false))) {
3317 SDVTList VTs = DAG.getVTList(VT, VT);
3318 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1);
3319 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) {
3320 // X % Y -> X-X/Y*Y
3321 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3);
3322 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3);
3323 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1);
3324 } else if (isSigned)
3325 Tmp1 = ExpandIntLibCall(Node, true,
3326 RTLIB::SREM_I8,
3327 RTLIB::SREM_I16, RTLIB::SREM_I32,
3328 RTLIB::SREM_I64, RTLIB::SREM_I128);
3329 else
3330 Tmp1 = ExpandIntLibCall(Node, false,
3331 RTLIB::UREM_I8,
3332 RTLIB::UREM_I16, RTLIB::UREM_I32,
3333 RTLIB::UREM_I64, RTLIB::UREM_I128);
3334 Results.push_back(Tmp1);
3335 break;
3336 }
3337 case ISD::UDIV:
3338 case ISD::SDIV: {
3339 bool isSigned = Node->getOpcode() == ISD::SDIV;
3340 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3341 EVT VT = Node->getValueType(0);
3342 SDVTList VTs = DAG.getVTList(VT, VT);
3343 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
3344 (isDivRemLibcallAvailable(Node, isSigned, TLI) &&
3345 useDivRem(Node, isSigned, true)))
3346 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
3347 Node->getOperand(1));
3348 else if (isSigned)
3349 Tmp1 = ExpandIntLibCall(Node, true,
3350 RTLIB::SDIV_I8,
3351 RTLIB::SDIV_I16, RTLIB::SDIV_I32,
3352 RTLIB::SDIV_I64, RTLIB::SDIV_I128);
3353 else
3354 Tmp1 = ExpandIntLibCall(Node, false,
3355 RTLIB::UDIV_I8,
3356 RTLIB::UDIV_I16, RTLIB::UDIV_I32,
3357 RTLIB::UDIV_I64, RTLIB::UDIV_I128);
3358 Results.push_back(Tmp1);
3359 break;
3360 }
3361 case ISD::MULHU:
3362 case ISD::MULHS: {
3363 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI :
3364 ISD::SMUL_LOHI;
3365 EVT VT = Node->getValueType(0);
3366 SDVTList VTs = DAG.getVTList(VT, VT);
3367 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) &&
3368 "If this wasn't legal, it shouldn't have been created!");
3369 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0),
3370 Node->getOperand(1));
3371 Results.push_back(Tmp1.getValue(1));
3372 break;
3373 }
3374 case ISD::SDIVREM:
3375 case ISD::UDIVREM:
3376 // Expand into divrem libcall
3377 ExpandDivRemLibCall(Node, Results);
3378 break;
3379 case ISD::MUL: {
3380 EVT VT = Node->getValueType(0);
3381 SDVTList VTs = DAG.getVTList(VT, VT);
3382 // See if multiply or divide can be lowered using two-result operations.
3383 // We just need the low half of the multiply; try both the signed
3384 // and unsigned forms. If the target supports both SMUL_LOHI and
3385 // UMUL_LOHI, form a preference by checking which forms of plain
3386 // MULH it supports.
3387 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT);
3388 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT);
3389 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT);
3390 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT);
3391 unsigned OpToUse = 0;
3392 if (HasSMUL_LOHI && !HasMULHS) {
3393 OpToUse = ISD::SMUL_LOHI;
3394 } else if (HasUMUL_LOHI && !HasMULHU) {
3395 OpToUse = ISD::UMUL_LOHI;
3396 } else if (HasSMUL_LOHI) {
3397 OpToUse = ISD::SMUL_LOHI;
3398 } else if (HasUMUL_LOHI) {
3399 OpToUse = ISD::UMUL_LOHI;
3400 }
3401 if (OpToUse) {
3402 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0),
3403 Node->getOperand(1)));
3404 break;
3405 }
3406 Tmp1 = ExpandIntLibCall(Node, false,
3407 RTLIB::MUL_I8,
3408 RTLIB::MUL_I16, RTLIB::MUL_I32,
3409 RTLIB::MUL_I64, RTLIB::MUL_I128);
3410 Results.push_back(Tmp1);
3411 break;
3412 }
3413 case ISD::SADDO:
3414 case ISD::SSUBO: {
3415 SDValue LHS = Node->getOperand(0);
3416 SDValue RHS = Node->getOperand(1);
3417 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
3418 ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
3419 LHS, RHS);
3420 Results.push_back(Sum);
3421 EVT OType = Node->getValueType(1);
3422
3423 SDValue Zero = DAG.getConstant(0, LHS.getValueType());
3424
3425 // LHSSign -> LHS >= 0
3426 // RHSSign -> RHS >= 0
3427 // SumSign -> Sum >= 0
3428 //
3429 // Add:
3430 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
3431 // Sub:
3432 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
3433 //
3434 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
3435 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
3436 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
3437 Node->getOpcode() == ISD::SADDO ?
3438 ISD::SETEQ : ISD::SETNE);
3439
3440 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
3441 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
3442
3443 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
3444 Results.push_back(Cmp);
3445 break;
3446 }
3447 case ISD::UADDO:
3448 case ISD::USUBO: {
3449 SDValue LHS = Node->getOperand(0);
3450 SDValue RHS = Node->getOperand(1);
3451 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ?
3452 ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
3453 LHS, RHS);
3454 Results.push_back(Sum);
3455 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS,
3456 Node->getOpcode () == ISD::UADDO ?
3457 ISD::SETULT : ISD::SETUGT));
3458 break;
3459 }
3460 case ISD::UMULO:
3461 case ISD::SMULO: {
3462 EVT VT = Node->getValueType(0);
3463 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
3464 SDValue LHS = Node->getOperand(0);
3465 SDValue RHS = Node->getOperand(1);
3466 SDValue BottomHalf;
3467 SDValue TopHalf;
3468 static const unsigned Ops[2][3] =
3469 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
3470 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
3471 bool isSigned = Node->getOpcode() == ISD::SMULO;
3472 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
3473 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
3474 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
3475 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
3476 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
3477 RHS);
3478 TopHalf = BottomHalf.getValue(1);
3479 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(),
3480 VT.getSizeInBits() * 2))) {
3481 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
3482 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
3483 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
3484 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
3485 DAG.getIntPtrConstant(0));
3486 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
3487 DAG.getIntPtrConstant(1));
3488 } else {
3489 // We can fall back to a libcall with an illegal type for the MUL if we
3490 // have a libcall big enough.
3491 // Also, we can fall back to a division in some cases, but that's a big
3492 // performance hit in the general case.
3493 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
3494 if (WideVT == MVT::i16)
3495 LC = RTLIB::MUL_I16;
3496 else if (WideVT == MVT::i32)
3497 LC = RTLIB::MUL_I32;
3498 else if (WideVT == MVT::i64)
3499 LC = RTLIB::MUL_I64;
3500 else if (WideVT == MVT::i128)
3501 LC = RTLIB::MUL_I128;
3502 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
3503
3504 // The high part is obtained by SRA'ing all but one of the bits of low
3505 // part.
3506 unsigned LoSize = VT.getSizeInBits();
3507 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS,
3508 DAG.getConstant(LoSize-1, TLI.getPointerTy()));
3509 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS,
3510 DAG.getConstant(LoSize-1, TLI.getPointerTy()));
3511
3512 // Here we're passing the 2 arguments explicitly as 4 arguments that are
3513 // pre-lowered to the correct types. This all depends upon WideVT not
3514 // being a legal type for the architecture and thus has to be split to
3515 // two arguments.
3516 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
3517 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl);
3518 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
3519 DAG.getIntPtrConstant(0));
3520 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
3521 DAG.getIntPtrConstant(1));
3522 // Ret is a node with an illegal type. Because such things are not
3523 // generally permitted during this phase of legalization, delete the
3524 // node. The above EXTRACT_ELEMENT nodes should have been folded.
3525 DAG.DeleteNode(Ret.getNode());
3526 }
3527
3528 if (isSigned) {
3529 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1,
3530 TLI.getShiftAmountTy(BottomHalf.getValueType()));
3531 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
3532 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1,
3533 ISD::SETNE);
3534 } else {
3535 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf,
3536 DAG.getConstant(0, VT), ISD::SETNE);
3537 }
3538 Results.push_back(BottomHalf);
3539 Results.push_back(TopHalf);
3540 break;
3541 }
3542 case ISD::BUILD_PAIR: {
3543 EVT PairTy = Node->getValueType(0);
3544 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
3545 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
3546 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2,
3547 DAG.getConstant(PairTy.getSizeInBits()/2,
3548 TLI.getShiftAmountTy(PairTy)));
3549 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
3550 break;
3551 }
3552 case ISD::SELECT:
3553 Tmp1 = Node->getOperand(0);
3554 Tmp2 = Node->getOperand(1);
3555 Tmp3 = Node->getOperand(2);
3556 if (Tmp1.getOpcode() == ISD::SETCC) {
3557 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1),
3558 Tmp2, Tmp3,
3559 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get());
3560 } else {
3561 Tmp1 = DAG.getSelectCC(dl, Tmp1,
3562 DAG.getConstant(0, Tmp1.getValueType()),
3563 Tmp2, Tmp3, ISD::SETNE);
3564 }
3565 Results.push_back(Tmp1);
3566 break;
3567 case ISD::BR_JT: {
3568 SDValue Chain = Node->getOperand(0);
3569 SDValue Table = Node->getOperand(1);
3570 SDValue Index = Node->getOperand(2);
3571
3572 EVT PTy = TLI.getPointerTy();
3573
3574 const DataLayout &TD = *TLI.getDataLayout();
3575 unsigned EntrySize =
3576 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
3577
3578 Index = DAG.getNode(ISD::MUL, dl, PTy,
3579 Index, DAG.getConstant(EntrySize, PTy));
3580 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
3581
3582 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
3583 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
3584 MachinePointerInfo::getJumpTable(), MemVT,
3585 false, false, 0);
3586 Addr = LD;
3587 if (TM.getRelocationModel() == Reloc::PIC_) {
3588 // For PIC, the sequence is:
3589 // BRIND(load(Jumptable + index) + RelocBase)
3590 // RelocBase can be JumpTable, GOT or some sort of global base.
3591 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr,
3592 TLI.getPICJumpTableRelocBase(Table, DAG));
3593 }
3594 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr);
3595 Results.push_back(Tmp1);
3596 break;
3597 }
3598 case ISD::BRCOND:
3599 // Expand brcond's setcc into its constituent parts and create a BR_CC
3600 // Node.
3601 Tmp1 = Node->getOperand(0);
3602 Tmp2 = Node->getOperand(1);
3603 if (Tmp2.getOpcode() == ISD::SETCC) {
3604 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other,
3605 Tmp1, Tmp2.getOperand(2),
3606 Tmp2.getOperand(0), Tmp2.getOperand(1),
3607 Node->getOperand(2));
3608 } else {
3609 // We test only the i1 bit. Skip the AND if UNDEF.
3610 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 :
3611 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2,
3612 DAG.getConstant(1, Tmp2.getValueType()));
3613 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1,
3614 DAG.getCondCode(ISD::SETNE), Tmp3,
3615 DAG.getConstant(0, Tmp3.getValueType()),
3616 Node->getOperand(2));
3617 }
3618 Results.push_back(Tmp1);
3619 break;
3620 case ISD::SETCC: {
3621 Tmp1 = Node->getOperand(0);
3622 Tmp2 = Node->getOperand(1);
3623 Tmp3 = Node->getOperand(2);
3624 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl);
3625
3626 // If we expanded the SETCC into an AND/OR, return the new node
3627 if (Tmp2.getNode() == 0) {
3628 Results.push_back(Tmp1);
3629 break;
3630 }
3631
3632 // Otherwise, SETCC for the given comparison type must be completely
3633 // illegal; expand it into a SELECT_CC.
3634 EVT VT = Node->getValueType(0);
3635 int TrueValue;
3636 switch (TLI.getBooleanContents(VT.isVector())) {
3637 case TargetLowering::ZeroOrOneBooleanContent:
3638 case TargetLowering::UndefinedBooleanContent:
3639 TrueValue = 1;
3640 break;
3641 case TargetLowering::ZeroOrNegativeOneBooleanContent:
3642 TrueValue = -1;
3643 break;
3644 }
3645 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2,
3646 DAG.getConstant(TrueValue, VT), DAG.getConstant(0, VT),
3647 Tmp3);
3648 Results.push_back(Tmp1);
3649 break;
3650 }
3651 case ISD::SELECT_CC: {
3652 Tmp1 = Node->getOperand(0); // LHS
3653 Tmp2 = Node->getOperand(1); // RHS
3654 Tmp3 = Node->getOperand(2); // True
3655 Tmp4 = Node->getOperand(3); // False
3656 SDValue CC = Node->getOperand(4);
3657
3658 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()),
3659 Tmp1, Tmp2, CC, dl);
3660
3661 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!");
3662 Tmp2 = DAG.getConstant(0, Tmp1.getValueType());
3663 CC = DAG.getCondCode(ISD::SETNE);
3664 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2,
3665 Tmp3, Tmp4, CC);
3666 Results.push_back(Tmp1);
3667 break;
3668 }
3669 case ISD::BR_CC: {
3670 Tmp1 = Node->getOperand(0); // Chain
3671 Tmp2 = Node->getOperand(2); // LHS
3672 Tmp3 = Node->getOperand(3); // RHS
3673 Tmp4 = Node->getOperand(1); // CC
3674
3675 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()),
3676 Tmp2, Tmp3, Tmp4, dl);
3677
3678 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!");
3679 Tmp3 = DAG.getConstant(0, Tmp2.getValueType());
3680 Tmp4 = DAG.getCondCode(ISD::SETNE);
3681 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2,
3682 Tmp3, Node->getOperand(4));
3683 Results.push_back(Tmp1);
3684 break;
3685 }
3686 case ISD::BUILD_VECTOR:
3687 Results.push_back(ExpandBUILD_VECTOR(Node));
3688 break;
3689 case ISD::SRA:
3690 case ISD::SRL:
3691 case ISD::SHL: {
3692 // Scalarize vector SRA/SRL/SHL.
3693 EVT VT = Node->getValueType(0);
3694 assert(VT.isVector() && "Unable to legalize non-vector shift");
3695 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
3696 unsigned NumElem = VT.getVectorNumElements();
3697
3698 SmallVector<SDValue, 8> Scalars;
3699 for (unsigned Idx = 0; Idx < NumElem; Idx++) {
3700 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
3701 VT.getScalarType(),
3702 Node->getOperand(0), DAG.getIntPtrConstant(Idx));
3703 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
3704 VT.getScalarType(),
3705 Node->getOperand(1), DAG.getIntPtrConstant(Idx));
3706 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
3707 VT.getScalarType(), Ex, Sh));
3708 }
3709 SDValue Result =
3710 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
3711 &Scalars[0], Scalars.size());
3712 ReplaceNode(SDValue(Node, 0), Result);
3713 break;
3714 }
3715 case ISD::GLOBAL_OFFSET_TABLE:
3716 case ISD::GlobalAddress:
3717 case ISD::GlobalTLSAddress:
3718 case ISD::ExternalSymbol:
3719 case ISD::ConstantPool:
3720 case ISD::JumpTable:
3721 case ISD::INTRINSIC_W_CHAIN:
3722 case ISD::INTRINSIC_WO_CHAIN:
3723 case ISD::INTRINSIC_VOID:
3724 // FIXME: Custom lowering for these operations shouldn't return null!
3725 break;
3726 }
3727
3728 // Replace the original node with the legalized result.
3729 if (!Results.empty())
3730 ReplaceNode(Node, Results.data());
3731 }
3732
PromoteNode(SDNode * Node)3733 void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
3734 SmallVector<SDValue, 8> Results;
3735 MVT OVT = Node->getSimpleValueType(0);
3736 if (Node->getOpcode() == ISD::UINT_TO_FP ||
3737 Node->getOpcode() == ISD::SINT_TO_FP ||
3738 Node->getOpcode() == ISD::SETCC) {
3739 OVT = Node->getOperand(0).getSimpleValueType();
3740 }
3741 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
3742 DebugLoc dl = Node->getDebugLoc();
3743 SDValue Tmp1, Tmp2, Tmp3;
3744 switch (Node->getOpcode()) {
3745 case ISD::CTTZ:
3746 case ISD::CTTZ_ZERO_UNDEF:
3747 case ISD::CTLZ:
3748 case ISD::CTLZ_ZERO_UNDEF:
3749 case ISD::CTPOP:
3750 // Zero extend the argument.
3751 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
3752 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is
3753 // already the correct result.
3754 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
3755 if (Node->getOpcode() == ISD::CTTZ) {
3756 // FIXME: This should set a bit in the zero extended value instead.
3757 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT),
3758 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT),
3759 ISD::SETEQ);
3760 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2,
3761 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
3762 } else if (Node->getOpcode() == ISD::CTLZ ||
3763 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
3764 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
3765 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1,
3766 DAG.getConstant(NVT.getSizeInBits() -
3767 OVT.getSizeInBits(), NVT));
3768 }
3769 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1));
3770 break;
3771 case ISD::BSWAP: {
3772 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
3773 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
3774 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1);
3775 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1,
3776 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT)));
3777 Results.push_back(Tmp1);
3778 break;
3779 }
3780 case ISD::FP_TO_UINT:
3781 case ISD::FP_TO_SINT:
3782 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0),
3783 Node->getOpcode() == ISD::FP_TO_SINT, dl);
3784 Results.push_back(Tmp1);
3785 break;
3786 case ISD::UINT_TO_FP:
3787 case ISD::SINT_TO_FP:
3788 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0),
3789 Node->getOpcode() == ISD::SINT_TO_FP, dl);
3790 Results.push_back(Tmp1);
3791 break;
3792 case ISD::VAARG: {
3793 SDValue Chain = Node->getOperand(0); // Get the chain.
3794 SDValue Ptr = Node->getOperand(1); // Get the pointer.
3795
3796 unsigned TruncOp;
3797 if (OVT.isVector()) {
3798 TruncOp = ISD::BITCAST;
3799 } else {
3800 assert(OVT.isInteger()
3801 && "VAARG promotion is supported only for vectors or integer types");
3802 TruncOp = ISD::TRUNCATE;
3803 }
3804
3805 // Perform the larger operation, then convert back
3806 Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2),
3807 Node->getConstantOperandVal(3));
3808 Chain = Tmp1.getValue(1);
3809
3810 Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1);
3811
3812 // Modified the chain result - switch anything that used the old chain to
3813 // use the new one.
3814 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2);
3815 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
3816 ReplacedNode(Node);
3817 break;
3818 }
3819 case ISD::AND:
3820 case ISD::OR:
3821 case ISD::XOR: {
3822 unsigned ExtOp, TruncOp;
3823 if (OVT.isVector()) {
3824 ExtOp = ISD::BITCAST;
3825 TruncOp = ISD::BITCAST;
3826 } else {
3827 assert(OVT.isInteger() && "Cannot promote logic operation");
3828 ExtOp = ISD::ANY_EXTEND;
3829 TruncOp = ISD::TRUNCATE;
3830 }
3831 // Promote each of the values to the new type.
3832 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
3833 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
3834 // Perform the larger operation, then convert back
3835 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
3836 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1));
3837 break;
3838 }
3839 case ISD::SELECT: {
3840 unsigned ExtOp, TruncOp;
3841 if (Node->getValueType(0).isVector()) {
3842 ExtOp = ISD::BITCAST;
3843 TruncOp = ISD::BITCAST;
3844 } else if (Node->getValueType(0).isInteger()) {
3845 ExtOp = ISD::ANY_EXTEND;
3846 TruncOp = ISD::TRUNCATE;
3847 } else {
3848 ExtOp = ISD::FP_EXTEND;
3849 TruncOp = ISD::FP_ROUND;
3850 }
3851 Tmp1 = Node->getOperand(0);
3852 // Promote each of the values to the new type.
3853 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
3854 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
3855 // Perform the larger operation, then round down.
3856 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3);
3857 if (TruncOp != ISD::FP_ROUND)
3858 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1);
3859 else
3860 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1,
3861 DAG.getIntPtrConstant(0));
3862 Results.push_back(Tmp1);
3863 break;
3864 }
3865 case ISD::VECTOR_SHUFFLE: {
3866 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
3867
3868 // Cast the two input vectors.
3869 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
3870 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
3871
3872 // Convert the shuffle mask to the right # elements.
3873 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
3874 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
3875 Results.push_back(Tmp1);
3876 break;
3877 }
3878 case ISD::SETCC: {
3879 unsigned ExtOp = ISD::FP_EXTEND;
3880 if (NVT.isInteger()) {
3881 ISD::CondCode CCCode =
3882 cast<CondCodeSDNode>(Node->getOperand(2))->get();
3883 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3884 }
3885 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
3886 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
3887 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
3888 Tmp1, Tmp2, Node->getOperand(2)));
3889 break;
3890 }
3891 case ISD::FDIV:
3892 case ISD::FREM:
3893 case ISD::FPOW: {
3894 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
3895 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
3896 Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
3897 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
3898 Tmp3, DAG.getIntPtrConstant(0)));
3899 break;
3900 }
3901 case ISD::FLOG2:
3902 case ISD::FEXP2:
3903 case ISD::FLOG:
3904 case ISD::FEXP: {
3905 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
3906 Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
3907 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
3908 Tmp2, DAG.getIntPtrConstant(0)));
3909 break;
3910 }
3911 }
3912
3913 // Replace the original node with the legalized result.
3914 if (!Results.empty())
3915 ReplaceNode(Node, Results.data());
3916 }
3917
3918 // SelectionDAG::Legalize - This is the entry point for the file.
3919 //
Legalize()3920 void SelectionDAG::Legalize() {
3921 /// run - This is the main entry point to this class.
3922 ///
3923 SelectionDAGLegalize(*this).LegalizeDAG();
3924 }
3925