1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "HexagonISelLowering.h"
16 #include "HexagonTargetMachine.h"
17 #include "HexagonMachineFunctionInfo.h"
18 #include "HexagonTargetObjectFile.h"
19 #include "HexagonSubtarget.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Function.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/GlobalAlias.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/CallingConv.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/CodeGen/ValueTypes.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39
40 using namespace llvm;
41
42 const unsigned Hexagon_MAX_RET_SIZE = 64;
43
44 static cl::opt<bool>
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46 cl::desc("Control jump table emission on Hexagon target"));
47
48 int NumNamedVarArgParams = -1;
49
50 // Implement calling convention for Hexagon.
51 static bool
52 CC_Hexagon(unsigned ValNo, MVT ValVT,
53 MVT LocVT, CCValAssign::LocInfo LocInfo,
54 ISD::ArgFlagsTy ArgFlags, CCState &State);
55
56 static bool
57 CC_Hexagon32(unsigned ValNo, MVT ValVT,
58 MVT LocVT, CCValAssign::LocInfo LocInfo,
59 ISD::ArgFlagsTy ArgFlags, CCState &State);
60
61 static bool
62 CC_Hexagon64(unsigned ValNo, MVT ValVT,
63 MVT LocVT, CCValAssign::LocInfo LocInfo,
64 ISD::ArgFlagsTy ArgFlags, CCState &State);
65
66 static bool
67 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
68 MVT LocVT, CCValAssign::LocInfo LocInfo,
69 ISD::ArgFlagsTy ArgFlags, CCState &State);
70
71 static bool
72 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
73 MVT LocVT, CCValAssign::LocInfo LocInfo,
74 ISD::ArgFlagsTy ArgFlags, CCState &State);
75
76 static bool
77 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
78 MVT LocVT, CCValAssign::LocInfo LocInfo,
79 ISD::ArgFlagsTy ArgFlags, CCState &State);
80
81 static bool
CC_Hexagon_VarArg(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)82 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
83 MVT LocVT, CCValAssign::LocInfo LocInfo,
84 ISD::ArgFlagsTy ArgFlags, CCState &State) {
85
86 // NumNamedVarArgParams can not be zero for a VarArg function.
87 assert ( (NumNamedVarArgParams > 0) &&
88 "NumNamedVarArgParams is not bigger than zero.");
89
90 if ( (int)ValNo < NumNamedVarArgParams ) {
91 // Deal with named arguments.
92 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
93 }
94
95 // Deal with un-named arguments.
96 unsigned ofst;
97 if (ArgFlags.isByVal()) {
98 // If pass-by-value, the size allocated on stack is decided
99 // by ArgFlags.getByValSize(), not by the size of LocVT.
100 assert ((ArgFlags.getByValSize() > 8) &&
101 "ByValSize must be bigger than 8 bytes");
102 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
103 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
104 return false;
105 }
106 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
107 ofst = State.AllocateStack(4, 4);
108 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
109 return false;
110 }
111 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
112 ofst = State.AllocateStack(8, 8);
113 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
114 return false;
115 }
116 llvm_unreachable(0);
117 }
118
119
120 static bool
CC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)121 CC_Hexagon (unsigned ValNo, MVT ValVT,
122 MVT LocVT, CCValAssign::LocInfo LocInfo,
123 ISD::ArgFlagsTy ArgFlags, CCState &State) {
124
125 if (ArgFlags.isByVal()) {
126 // Passed on stack.
127 assert ((ArgFlags.getByValSize() > 8) &&
128 "ByValSize must be bigger than 8 bytes");
129 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
130 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
131 return false;
132 }
133
134 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
135 LocVT = MVT::i32;
136 ValVT = MVT::i32;
137 if (ArgFlags.isSExt())
138 LocInfo = CCValAssign::SExt;
139 else if (ArgFlags.isZExt())
140 LocInfo = CCValAssign::ZExt;
141 else
142 LocInfo = CCValAssign::AExt;
143 }
144
145 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
146 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
147 return false;
148 }
149
150 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
151 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
152 return false;
153 }
154
155 return true; // CC didn't match.
156 }
157
158
CC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)159 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
160 MVT LocVT, CCValAssign::LocInfo LocInfo,
161 ISD::ArgFlagsTy ArgFlags, CCState &State) {
162
163 static const uint16_t RegList[] = {
164 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
165 Hexagon::R5
166 };
167 if (unsigned Reg = State.AllocateReg(RegList, 6)) {
168 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
169 return false;
170 }
171
172 unsigned Offset = State.AllocateStack(4, 4);
173 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
174 return false;
175 }
176
CC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)177 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
178 MVT LocVT, CCValAssign::LocInfo LocInfo,
179 ISD::ArgFlagsTy ArgFlags, CCState &State) {
180
181 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
182 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
183 return false;
184 }
185
186 static const uint16_t RegList1[] = {
187 Hexagon::D1, Hexagon::D2
188 };
189 static const uint16_t RegList2[] = {
190 Hexagon::R1, Hexagon::R3
191 };
192 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
193 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
194 return false;
195 }
196
197 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
198 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
199 return false;
200 }
201
RetCC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)202 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
203 MVT LocVT, CCValAssign::LocInfo LocInfo,
204 ISD::ArgFlagsTy ArgFlags, CCState &State) {
205
206
207 if (LocVT == MVT::i1 ||
208 LocVT == MVT::i8 ||
209 LocVT == MVT::i16) {
210 LocVT = MVT::i32;
211 ValVT = MVT::i32;
212 if (ArgFlags.isSExt())
213 LocInfo = CCValAssign::SExt;
214 else if (ArgFlags.isZExt())
215 LocInfo = CCValAssign::ZExt;
216 else
217 LocInfo = CCValAssign::AExt;
218 }
219
220 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
221 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
222 return false;
223 }
224
225 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
226 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
227 return false;
228 }
229
230 return true; // CC didn't match.
231 }
232
RetCC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)233 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
234 MVT LocVT, CCValAssign::LocInfo LocInfo,
235 ISD::ArgFlagsTy ArgFlags, CCState &State) {
236
237 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
238 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
239 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
240 return false;
241 }
242 }
243
244 unsigned Offset = State.AllocateStack(4, 4);
245 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
246 return false;
247 }
248
RetCC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)249 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
250 MVT LocVT, CCValAssign::LocInfo LocInfo,
251 ISD::ArgFlagsTy ArgFlags, CCState &State) {
252 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
253 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
254 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
255 return false;
256 }
257 }
258
259 unsigned Offset = State.AllocateStack(8, 8);
260 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
261 return false;
262 }
263
264 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const265 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
266 const {
267 return SDValue();
268 }
269
270 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
271 /// by "Src" to address "Dst" of size "Size". Alignment information is
272 /// specified by the specific parameter attribute. The copy will be passed as
273 /// a byval function parameter. Sometimes what we are copying is the end of a
274 /// larger object, the part that does not fit in registers.
275 static SDValue
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,DebugLoc dl)276 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
277 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
278 DebugLoc dl) {
279
280 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
281 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
282 /*isVolatile=*/false, /*AlwaysInline=*/false,
283 MachinePointerInfo(), MachinePointerInfo());
284 }
285
286
287 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
288 // passed by value, the function prototype is modified to return void and
289 // the value is stored in memory pointed by a pointer passed by caller.
290 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,DebugLoc dl,SelectionDAG & DAG) const291 HexagonTargetLowering::LowerReturn(SDValue Chain,
292 CallingConv::ID CallConv, bool isVarArg,
293 const SmallVectorImpl<ISD::OutputArg> &Outs,
294 const SmallVectorImpl<SDValue> &OutVals,
295 DebugLoc dl, SelectionDAG &DAG) const {
296
297 // CCValAssign - represent the assignment of the return value to locations.
298 SmallVector<CCValAssign, 16> RVLocs;
299
300 // CCState - Info about the registers and stack slot.
301 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
302 getTargetMachine(), RVLocs, *DAG.getContext());
303
304 // Analyze return values of ISD::RET
305 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
306
307 // If this is the first return lowered for this function, add the regs to the
308 // liveout set for the function.
309 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
310 for (unsigned i = 0; i != RVLocs.size(); ++i)
311 if (RVLocs[i].isRegLoc())
312 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
313 }
314
315 SDValue Flag;
316 // Copy the result values into the output registers.
317 for (unsigned i = 0; i != RVLocs.size(); ++i) {
318 CCValAssign &VA = RVLocs[i];
319
320 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
321
322 // Guarantee that all emitted copies are stuck together with flags.
323 Flag = Chain.getValue(1);
324 }
325
326 if (Flag.getNode())
327 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
328
329 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain);
330 }
331
332
333
334
335 /// LowerCallResult - Lower the result values of an ISD::CALL into the
336 /// appropriate copies out of appropriate physical registers. This assumes that
337 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
338 /// being lowered. Returns a SDNode with the same number of values as the
339 /// ISD::CALL.
340 SDValue
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,DebugLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const SmallVectorImpl<SDValue> & OutVals,SDValue Callee) const341 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
342 CallingConv::ID CallConv, bool isVarArg,
343 const
344 SmallVectorImpl<ISD::InputArg> &Ins,
345 DebugLoc dl, SelectionDAG &DAG,
346 SmallVectorImpl<SDValue> &InVals,
347 const SmallVectorImpl<SDValue> &OutVals,
348 SDValue Callee) const {
349
350 // Assign locations to each value returned by this call.
351 SmallVector<CCValAssign, 16> RVLocs;
352
353 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
354 getTargetMachine(), RVLocs, *DAG.getContext());
355
356 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
357
358 // Copy all of the result registers out of their specified physreg.
359 for (unsigned i = 0; i != RVLocs.size(); ++i) {
360 Chain = DAG.getCopyFromReg(Chain, dl,
361 RVLocs[i].getLocReg(),
362 RVLocs[i].getValVT(), InFlag).getValue(1);
363 InFlag = Chain.getValue(2);
364 InVals.push_back(Chain.getValue(0));
365 }
366
367 return Chain;
368 }
369
370 /// LowerCall - Functions arguments are copied from virtual regs to
371 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
372 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const373 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
374 SmallVectorImpl<SDValue> &InVals) const {
375 SelectionDAG &DAG = CLI.DAG;
376 DebugLoc &dl = CLI.DL;
377 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
378 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
379 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
380 SDValue Chain = CLI.Chain;
381 SDValue Callee = CLI.Callee;
382 bool &isTailCall = CLI.IsTailCall;
383 CallingConv::ID CallConv = CLI.CallConv;
384 bool isVarArg = CLI.IsVarArg;
385
386 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
387
388 // Analyze operands of the call, assigning locations to each operand.
389 SmallVector<CCValAssign, 16> ArgLocs;
390 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
391 getTargetMachine(), ArgLocs, *DAG.getContext());
392
393 // Check for varargs.
394 NumNamedVarArgParams = -1;
395 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
396 {
397 const Function* CalleeFn = NULL;
398 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
399 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
400 {
401 // If a function has zero args and is a vararg function, that's
402 // disallowed so it must be an undeclared function. Do not assume
403 // varargs if the callee is undefined.
404 if (CalleeFn->isVarArg() &&
405 CalleeFn->getFunctionType()->getNumParams() != 0) {
406 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
407 }
408 }
409 }
410
411 if (NumNamedVarArgParams > 0)
412 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
413 else
414 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
415
416
417 if(isTailCall) {
418 bool StructAttrFlag =
419 DAG.getMachineFunction().getFunction()->hasStructRetAttr();
420 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
421 isVarArg, IsStructRet,
422 StructAttrFlag,
423 Outs, OutVals, Ins, DAG);
424 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
425 CCValAssign &VA = ArgLocs[i];
426 if (VA.isMemLoc()) {
427 isTailCall = false;
428 break;
429 }
430 }
431 if (isTailCall) {
432 DEBUG(dbgs () << "Eligible for Tail Call\n");
433 } else {
434 DEBUG(dbgs () <<
435 "Argument must be passed on stack. Not eligible for Tail Call\n");
436 }
437 }
438 // Get a count of how many bytes are to be pushed on the stack.
439 unsigned NumBytes = CCInfo.getNextStackOffset();
440 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
441 SmallVector<SDValue, 8> MemOpChains;
442
443 SDValue StackPtr =
444 DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(),
445 getPointerTy());
446
447 // Walk the register/memloc assignments, inserting copies/loads.
448 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
449 CCValAssign &VA = ArgLocs[i];
450 SDValue Arg = OutVals[i];
451 ISD::ArgFlagsTy Flags = Outs[i].Flags;
452
453 // Promote the value if needed.
454 switch (VA.getLocInfo()) {
455 default:
456 // Loc info must be one of Full, SExt, ZExt, or AExt.
457 llvm_unreachable("Unknown loc info!");
458 case CCValAssign::Full:
459 break;
460 case CCValAssign::SExt:
461 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
462 break;
463 case CCValAssign::ZExt:
464 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
465 break;
466 case CCValAssign::AExt:
467 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
468 break;
469 }
470
471 if (VA.isMemLoc()) {
472 unsigned LocMemOffset = VA.getLocMemOffset();
473 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
474 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
475
476 if (Flags.isByVal()) {
477 // The argument is a struct passed by value. According to LLVM, "Arg"
478 // is is pointer.
479 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
480 Flags, DAG, dl));
481 } else {
482 // The argument is not passed by value. "Arg" is a buildin type. It is
483 // not a pointer.
484 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
485 MachinePointerInfo(),false, false,
486 0));
487 }
488 continue;
489 }
490
491 // Arguments that can be passed on register must be kept at RegsToPass
492 // vector.
493 if (VA.isRegLoc()) {
494 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
495 }
496 }
497
498 // Transform all store nodes into one single node because all store
499 // nodes are independent of each other.
500 if (!MemOpChains.empty()) {
501 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0],
502 MemOpChains.size());
503 }
504
505 if (!isTailCall)
506 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
507 getPointerTy(), true));
508
509 // Build a sequence of copy-to-reg nodes chained together with token
510 // chain and flag operands which copy the outgoing args into registers.
511 // The InFlag in necessary since all emitted instructions must be
512 // stuck together.
513 SDValue InFlag;
514 if (!isTailCall) {
515 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
516 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
517 RegsToPass[i].second, InFlag);
518 InFlag = Chain.getValue(1);
519 }
520 }
521
522 // For tail calls lower the arguments to the 'real' stack slot.
523 if (isTailCall) {
524 // Force all the incoming stack arguments to be loaded from the stack
525 // before any new outgoing arguments are stored to the stack, because the
526 // outgoing stack slots may alias the incoming argument stack slots, and
527 // the alias isn't otherwise explicit. This is slightly more conservative
528 // than necessary, because it means that each store effectively depends
529 // on every argument instead of just those arguments it would clobber.
530 //
531 // Do not flag preceding copytoreg stuff together with the following stuff.
532 InFlag = SDValue();
533 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
534 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
535 RegsToPass[i].second, InFlag);
536 InFlag = Chain.getValue(1);
537 }
538 InFlag =SDValue();
539 }
540
541 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
542 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
543 // node so that legalize doesn't hack it.
544 if (flag_aligned_memcpy) {
545 const char *MemcpyName =
546 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
547 Callee =
548 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
549 flag_aligned_memcpy = false;
550 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
551 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
552 } else if (ExternalSymbolSDNode *S =
553 dyn_cast<ExternalSymbolSDNode>(Callee)) {
554 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
555 }
556
557 // Returns a chain & a flag for retval copy to use.
558 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
559 SmallVector<SDValue, 8> Ops;
560 Ops.push_back(Chain);
561 Ops.push_back(Callee);
562
563 // Add argument registers to the end of the list so that they are
564 // known live into the call.
565 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
566 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
567 RegsToPass[i].second.getValueType()));
568 }
569
570 if (InFlag.getNode()) {
571 Ops.push_back(InFlag);
572 }
573
574 if (isTailCall)
575 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
576
577 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
578 InFlag = Chain.getValue(1);
579
580 // Create the CALLSEQ_END node.
581 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
582 DAG.getIntPtrConstant(0, true), InFlag);
583 InFlag = Chain.getValue(1);
584
585 // Handle result values, copying them out of physregs into vregs that we
586 // return.
587 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
588 InVals, OutVals, Callee);
589 }
590
getIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)591 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
592 bool isSEXTLoad, SDValue &Base,
593 SDValue &Offset, bool &isInc,
594 SelectionDAG &DAG) {
595 if (Ptr->getOpcode() != ISD::ADD)
596 return false;
597
598 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
599 isInc = (Ptr->getOpcode() == ISD::ADD);
600 Base = Ptr->getOperand(0);
601 Offset = Ptr->getOperand(1);
602 // Ensure that Offset is a constant.
603 return (isa<ConstantSDNode>(Offset));
604 }
605
606 return false;
607 }
608
609 // TODO: Put this function along with the other isS* functions in
610 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
611 // functions defined in HexagonImmediates.td.
Is_PostInc_S4_Offset(SDNode * S,int ShiftAmount)612 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
613 ConstantSDNode *N = cast<ConstantSDNode>(S);
614
615 // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
616 // field.
617 int64_t v = (int64_t)N->getSExtValue();
618 int64_t m = 0;
619 if (ShiftAmount > 0) {
620 m = v % ShiftAmount;
621 v = v >> ShiftAmount;
622 }
623 return (v <= 7) && (v >= -8) && (m == 0);
624 }
625
626 /// getPostIndexedAddressParts - returns true by value, base pointer and
627 /// offset pointer and addressing mode by reference if this node can be
628 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const629 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
630 SDValue &Base,
631 SDValue &Offset,
632 ISD::MemIndexedMode &AM,
633 SelectionDAG &DAG) const
634 {
635 EVT VT;
636 SDValue Ptr;
637 bool isSEXTLoad = false;
638
639 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
640 VT = LD->getMemoryVT();
641 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
642 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
643 VT = ST->getMemoryVT();
644 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
645 return false;
646 }
647 } else {
648 return false;
649 }
650
651 bool isInc = false;
652 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
653 isInc, DAG);
654 // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
655 int ShiftAmount = VT.getSizeInBits() / 16;
656 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
657 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
658 return true;
659 }
660
661 return false;
662 }
663
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const664 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
665 SelectionDAG &DAG) const {
666 SDNode *Node = Op.getNode();
667 MachineFunction &MF = DAG.getMachineFunction();
668 HexagonMachineFunctionInfo *FuncInfo =
669 MF.getInfo<HexagonMachineFunctionInfo>();
670 switch (Node->getOpcode()) {
671 case ISD::INLINEASM: {
672 unsigned NumOps = Node->getNumOperands();
673 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
674 --NumOps; // Ignore the flag operand.
675
676 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
677 if (FuncInfo->hasClobberLR())
678 break;
679 unsigned Flags =
680 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
681 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
682 ++i; // Skip the ID value.
683
684 switch (InlineAsm::getKind(Flags)) {
685 default: llvm_unreachable("Bad flags!");
686 case InlineAsm::Kind_RegDef:
687 case InlineAsm::Kind_RegUse:
688 case InlineAsm::Kind_Imm:
689 case InlineAsm::Kind_Clobber:
690 case InlineAsm::Kind_Mem: {
691 for (; NumVals; --NumVals, ++i) {}
692 break;
693 }
694 case InlineAsm::Kind_RegDefEarlyClobber: {
695 for (; NumVals; --NumVals, ++i) {
696 unsigned Reg =
697 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
698
699 // Check it to be lr
700 if (Reg == TM.getRegisterInfo()->getRARegister()) {
701 FuncInfo->setHasClobberLR(true);
702 break;
703 }
704 }
705 break;
706 }
707 }
708 }
709 }
710 } // Node->getOpcode
711 return Op;
712 }
713
714
715 //
716 // Taken from the XCore backend.
717 //
718 SDValue HexagonTargetLowering::
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const719 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
720 {
721 SDValue Chain = Op.getOperand(0);
722 SDValue Table = Op.getOperand(1);
723 SDValue Index = Op.getOperand(2);
724 DebugLoc dl = Op.getDebugLoc();
725 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
726 unsigned JTI = JT->getIndex();
727 MachineFunction &MF = DAG.getMachineFunction();
728 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
729 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
730
731 // Mark all jump table targets as address taken.
732 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
733 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
734 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
735 MachineBasicBlock *MBB = JTBBs[i];
736 MBB->setHasAddressTaken();
737 // This line is needed to set the hasAddressTaken flag on the BasicBlock
738 // object.
739 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
740 }
741
742 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
743 getPointerTy(), TargetJT);
744 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
745 DAG.getConstant(2, MVT::i32));
746 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
747 ShiftIndex);
748 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
749 MachinePointerInfo(), false, false, false,
750 0);
751 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
752 }
753
754
755 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const756 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
757 SelectionDAG &DAG) const {
758 SDValue Chain = Op.getOperand(0);
759 SDValue Size = Op.getOperand(1);
760 DebugLoc dl = Op.getDebugLoc();
761
762 unsigned SPReg = getStackPointerRegisterToSaveRestore();
763
764 // Get a reference to the stack pointer.
765 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
766
767 // Subtract the dynamic size from the actual stack size to
768 // obtain the new stack size.
769 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
770
771 //
772 // For Hexagon, the outgoing memory arguments area should be on top of the
773 // alloca area on the stack i.e., the outgoing memory arguments should be
774 // at a lower address than the alloca area. Move the alloca area down the
775 // stack by adding back the space reserved for outgoing arguments to SP
776 // here.
777 //
778 // We do not know what the size of the outgoing args is at this point.
779 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
780 // stack pointer. We patch this instruction with the correct, known
781 // offset in emitPrologue().
782 //
783 // Use a placeholder immediate (zero) for now. This will be patched up
784 // by emitPrologue().
785 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
786 MVT::i32,
787 Sub,
788 DAG.getConstant(0, MVT::i32));
789
790 // The Sub result contains the new stack start address, so it
791 // must be placed in the stack pointer register.
792 SDValue CopyChain = DAG.getCopyToReg(Chain, dl,
793 TM.getRegisterInfo()->getStackRegister(),
794 Sub);
795
796 SDValue Ops[2] = { ArgAdjust, CopyChain };
797 return DAG.getMergeValues(Ops, 2, dl);
798 }
799
800 SDValue
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,DebugLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const801 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
802 CallingConv::ID CallConv,
803 bool isVarArg,
804 const
805 SmallVectorImpl<ISD::InputArg> &Ins,
806 DebugLoc dl, SelectionDAG &DAG,
807 SmallVectorImpl<SDValue> &InVals)
808 const {
809
810 MachineFunction &MF = DAG.getMachineFunction();
811 MachineFrameInfo *MFI = MF.getFrameInfo();
812 MachineRegisterInfo &RegInfo = MF.getRegInfo();
813 HexagonMachineFunctionInfo *FuncInfo =
814 MF.getInfo<HexagonMachineFunctionInfo>();
815
816
817 // Assign locations to all of the incoming arguments.
818 SmallVector<CCValAssign, 16> ArgLocs;
819 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
820 getTargetMachine(), ArgLocs, *DAG.getContext());
821
822 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
823
824 // For LLVM, in the case when returning a struct by value (>8byte),
825 // the first argument is a pointer that points to the location on caller's
826 // stack where the return value will be stored. For Hexagon, the location on
827 // caller's stack is passed only when the struct size is smaller than (and
828 // equal to) 8 bytes. If not, no address will be passed into callee and
829 // callee return the result direclty through R0/R1.
830
831 SmallVector<SDValue, 4> MemOps;
832
833 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
834 CCValAssign &VA = ArgLocs[i];
835 ISD::ArgFlagsTy Flags = Ins[i].Flags;
836 unsigned ObjSize;
837 unsigned StackLocation;
838 int FI;
839
840 if ( (VA.isRegLoc() && !Flags.isByVal())
841 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
842 // Arguments passed in registers
843 // 1. int, long long, ptr args that get allocated in register.
844 // 2. Large struct that gets an register to put its address in.
845 EVT RegVT = VA.getLocVT();
846 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
847 RegVT == MVT::i32 || RegVT == MVT::f32) {
848 unsigned VReg =
849 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
850 RegInfo.addLiveIn(VA.getLocReg(), VReg);
851 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
852 } else if (RegVT == MVT::i64) {
853 unsigned VReg =
854 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
855 RegInfo.addLiveIn(VA.getLocReg(), VReg);
856 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
857 } else {
858 assert (0);
859 }
860 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
861 assert (0 && "ByValSize must be bigger than 8 bytes");
862 } else {
863 // Sanity check.
864 assert(VA.isMemLoc());
865
866 if (Flags.isByVal()) {
867 // If it's a byval parameter, then we need to compute the
868 // "real" size, not the size of the pointer.
869 ObjSize = Flags.getByValSize();
870 } else {
871 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
872 }
873
874 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
875 // Create the frame index object for this incoming parameter...
876 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
877
878 // Create the SelectionDAG nodes cordl, responding to a load
879 // from this parameter.
880 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
881
882 if (Flags.isByVal()) {
883 // If it's a pass-by-value aggregate, then do not dereference the stack
884 // location. Instead, we should generate a reference to the stack
885 // location.
886 InVals.push_back(FIN);
887 } else {
888 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
889 MachinePointerInfo(), false, false,
890 false, 0));
891 }
892 }
893 }
894
895 if (!MemOps.empty())
896 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
897 MemOps.size());
898
899 if (isVarArg) {
900 // This will point to the next argument passed via stack.
901 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
902 HEXAGON_LRFP_SIZE +
903 CCInfo.getNextStackOffset(),
904 true);
905 FuncInfo->setVarArgsFrameIndex(FrameIndex);
906 }
907
908 return Chain;
909 }
910
911 SDValue
LowerVASTART(SDValue Op,SelectionDAG & DAG) const912 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
913 // VASTART stores the address of the VarArgsFrameIndex slot into the
914 // memory location argument.
915 MachineFunction &MF = DAG.getMachineFunction();
916 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
917 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
918 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
919 return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), Addr,
920 Op.getOperand(1), MachinePointerInfo(SV), false,
921 false, 0);
922 }
923
924 SDValue
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const925 HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
926 SDValue LHS = Op.getOperand(0);
927 SDValue RHS = Op.getOperand(1);
928 SDValue CC = Op.getOperand(4);
929 SDValue TrueVal = Op.getOperand(2);
930 SDValue FalseVal = Op.getOperand(3);
931 DebugLoc dl = Op.getDebugLoc();
932 SDNode* OpNode = Op.getNode();
933 EVT SVT = OpNode->getValueType(0);
934
935 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC);
936 return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal);
937 }
938
939 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const940 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
941 EVT ValTy = Op.getValueType();
942
943 DebugLoc dl = Op.getDebugLoc();
944 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
945 SDValue Res;
946 if (CP->isMachineConstantPoolEntry())
947 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
948 CP->getAlignment());
949 else
950 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
951 CP->getAlignment());
952 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
953 }
954
955 SDValue
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const956 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
957 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
958 MachineFunction &MF = DAG.getMachineFunction();
959 MachineFrameInfo *MFI = MF.getFrameInfo();
960 MFI->setReturnAddressIsTaken(true);
961
962 EVT VT = Op.getValueType();
963 DebugLoc dl = Op.getDebugLoc();
964 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
965 if (Depth) {
966 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
967 SDValue Offset = DAG.getConstant(4, MVT::i32);
968 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
969 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
970 MachinePointerInfo(), false, false, false, 0);
971 }
972
973 // Return LR, which contains the return address. Mark it an implicit live-in.
974 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
975 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
976 }
977
978 SDValue
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const979 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
980 const HexagonRegisterInfo *TRI = TM.getRegisterInfo();
981 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
982 MFI->setFrameAddressIsTaken(true);
983
984 EVT VT = Op.getValueType();
985 DebugLoc dl = Op.getDebugLoc();
986 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
987 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
988 TRI->getFrameRegister(), VT);
989 while (Depth--)
990 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
991 MachinePointerInfo(),
992 false, false, false, 0);
993 return FrameAddr;
994 }
995
996
LowerMEMBARRIER(SDValue Op,SelectionDAG & DAG) const997 SDValue HexagonTargetLowering::LowerMEMBARRIER(SDValue Op,
998 SelectionDAG& DAG) const {
999 DebugLoc dl = Op.getDebugLoc();
1000 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1001 }
1002
1003
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const1004 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1005 SelectionDAG& DAG) const {
1006 DebugLoc dl = Op.getDebugLoc();
1007 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1008 }
1009
1010
LowerGLOBALADDRESS(SDValue Op,SelectionDAG & DAG) const1011 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1012 SelectionDAG &DAG) const {
1013 SDValue Result;
1014 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1015 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1016 DebugLoc dl = Op.getDebugLoc();
1017 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1018
1019 HexagonTargetObjectFile &TLOF =
1020 (HexagonTargetObjectFile&)getObjFileLowering();
1021 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1022 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1023 }
1024
1025 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1026 }
1027
1028 //===----------------------------------------------------------------------===//
1029 // TargetLowering Implementation
1030 //===----------------------------------------------------------------------===//
1031
HexagonTargetLowering(HexagonTargetMachine & targetmachine)1032 HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
1033 &targetmachine)
1034 : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
1035 TM(targetmachine) {
1036
1037 const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
1038
1039 // Set up the register classes.
1040 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1041 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1042
1043 if (QRI->Subtarget.hasV5TOps()) {
1044 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1045 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1046 }
1047
1048 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1049
1050 computeRegisterProperties();
1051
1052 // Align loop entry
1053 setPrefLoopAlignment(4);
1054
1055 // Limits for inline expansion of memcpy/memmove
1056 maxStoresPerMemcpy = 6;
1057 maxStoresPerMemmove = 6;
1058
1059 //
1060 // Library calls for unsupported operations
1061 //
1062
1063 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1064 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1065
1066 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1067 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1068
1069 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1070 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1071
1072 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1073 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1074 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1075 setOperationAction(ISD::SREM, MVT::i32, Expand);
1076
1077 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1078 setOperationAction(ISD::SDIV, MVT::i64, Expand);
1079 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1080 setOperationAction(ISD::SREM, MVT::i64, Expand);
1081
1082 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1083 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1084
1085 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1086 setOperationAction(ISD::UDIV, MVT::i64, Expand);
1087
1088 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1089 setOperationAction(ISD::UREM, MVT::i32, Expand);
1090
1091 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1092 setOperationAction(ISD::UREM, MVT::i64, Expand);
1093
1094 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1095 setOperationAction(ISD::FDIV, MVT::f32, Expand);
1096
1097 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1098 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1099
1100 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1101 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1102 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1103 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1104
1105 if (QRI->Subtarget.hasV5TOps()) {
1106 // Hexagon V5 Support.
1107 setOperationAction(ISD::FADD, MVT::f32, Legal);
1108 setOperationAction(ISD::FADD, MVT::f64, Legal);
1109 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1110 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1111 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1112 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1113 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1114
1115 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1116 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1117 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1118 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1119
1120 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1121 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1122 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1123 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1124
1125 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1126 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1127 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1128 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1129
1130 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1131 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1132
1133 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1134 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1135 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1136 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1137
1138 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1139 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1140 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1141 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1142
1143 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1144 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1145 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1146 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1147
1148 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1149 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1150 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1151 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1152
1153 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1154 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1155 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1156 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1157
1158 setOperationAction(ISD::FABS, MVT::f32, Legal);
1159 setOperationAction(ISD::FABS, MVT::f64, Expand);
1160
1161 setOperationAction(ISD::FNEG, MVT::f32, Legal);
1162 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1163 } else {
1164
1165 // Expand fp<->uint.
1166 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1167 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1168
1169 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1170 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1171
1172 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1173 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1174
1175 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1176 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1177
1178 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1179 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1180
1181 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1182 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1183
1184 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1185 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1186
1187 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1188 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1189
1190 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1191 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1192
1193 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1194 setOperationAction(ISD::FADD, MVT::f64, Expand);
1195
1196 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1197 setOperationAction(ISD::FADD, MVT::f32, Expand);
1198
1199 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1200 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1201
1202 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1203 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1204
1205 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1206 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1207
1208 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1209 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1210
1211 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1212 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1213
1214 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1215 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1216
1217 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1218 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1219
1220 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1221 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1222
1223 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1224 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1225
1226 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1227 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1228
1229 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1230 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1231
1232 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1233 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1234
1235 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1236 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1237
1238 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1239 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1240
1241 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1242 setOperationAction(ISD::MUL, MVT::f32, Expand);
1243
1244 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1246
1247 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1248
1249 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1250 setOperationAction(ISD::SUB, MVT::f64, Expand);
1251
1252 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1253 setOperationAction(ISD::SUB, MVT::f32, Expand);
1254
1255 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1256 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1257
1258 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1259 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1260
1261 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1262 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1263
1264 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1265 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1266
1267 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1268 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1269
1270 setOperationAction(ISD::FABS, MVT::f32, Expand);
1271 setOperationAction(ISD::FABS, MVT::f64, Expand);
1272 setOperationAction(ISD::FNEG, MVT::f32, Expand);
1273 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1274 }
1275
1276 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1277 setOperationAction(ISD::SREM, MVT::i32, Expand);
1278
1279 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1280 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1281 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1282 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1283
1284 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1285 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1286 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1287 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1288
1289 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1290
1291 // Turn FP extload into load/fextend.
1292 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
1293 // Hexagon has a i1 sign extending load.
1294 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
1295 // Turn FP truncstore into trunc + store.
1296 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1297
1298 // Custom legalize GlobalAddress nodes into CONST32.
1299 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1300 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1301 // Truncate action?
1302 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1303
1304 // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1305 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1306
1307 // Hexagon has no REM or DIVREM operations.
1308 setOperationAction(ISD::UREM, MVT::i32, Expand);
1309 setOperationAction(ISD::SREM, MVT::i32, Expand);
1310 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1311 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1312 setOperationAction(ISD::SREM, MVT::i64, Expand);
1313 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1314 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1315
1316 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1317
1318 // Lower SELECT_CC to SETCC and SELECT.
1319 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1320 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1321
1322 if (QRI->Subtarget.hasV5TOps()) {
1323
1324 // We need to make the operation type of SELECT node to be Custom,
1325 // such that we don't go into the infinite loop of
1326 // select -> setcc -> select_cc -> select loop.
1327 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1328 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1329
1330 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1331 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1332 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
1333
1334 } else {
1335
1336 // Hexagon has no select or setcc: expand to SELECT_CC.
1337 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1338 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1339
1340 // This is a workaround documented in DAGCombiner.cpp:2892 We don't
1341 // support SELECT_CC on every type.
1342 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
1343
1344 }
1345
1346 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1347 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1348 if (EmitJumpTables) {
1349 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1350 } else {
1351 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1352 }
1353
1354 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1355
1356 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
1357 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1358
1359 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1360 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1361 setOperationAction(ISD::FREM , MVT::f64, Expand);
1362 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1363 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1364 setOperationAction(ISD::FREM , MVT::f32, Expand);
1365 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1366 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1367 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1368 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1369 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1370 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1371 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1372 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1373 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1374 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1375 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1376 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1377
1378 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1379 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1380 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1381
1382 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1383 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1384
1385 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1386 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1387
1388 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
1389 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
1390 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
1391 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
1392
1393 setOperationAction(ISD::EH_RETURN, MVT::Other, Expand);
1394
1395 if (TM.getSubtargetImpl()->isSubtargetV2()) {
1396 setExceptionPointerRegister(Hexagon::R20);
1397 setExceptionSelectorRegister(Hexagon::R21);
1398 } else {
1399 setExceptionPointerRegister(Hexagon::R0);
1400 setExceptionSelectorRegister(Hexagon::R1);
1401 }
1402
1403 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1404 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1405
1406 // Use the default implementation.
1407 setOperationAction(ISD::VAARG , MVT::Other, Expand);
1408 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1409 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1410 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1411 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1412
1413
1414 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1415 setOperationAction(ISD::INLINEASM , MVT::Other, Custom);
1416
1417 setMinFunctionAlignment(2);
1418
1419 // Needed for DYNAMIC_STACKALLOC expansion.
1420 unsigned StackRegister = TM.getRegisterInfo()->getStackRegister();
1421 setStackPointerRegisterToSaveRestore(StackRegister);
1422 setSchedulingPreference(Sched::VLIW);
1423 }
1424
1425
1426 const char*
getTargetNodeName(unsigned Opcode) const1427 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1428 switch (Opcode) {
1429 default: return 0;
1430 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1431 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1432 case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
1433 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
1434 case HexagonISD::BRICC: return "HexagonISD::BRICC";
1435 case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
1436 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
1437 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
1438 case HexagonISD::Hi: return "HexagonISD::Hi";
1439 case HexagonISD::Lo: return "HexagonISD::Lo";
1440 case HexagonISD::FTOI: return "HexagonISD::FTOI";
1441 case HexagonISD::ITOF: return "HexagonISD::ITOF";
1442 case HexagonISD::CALL: return "HexagonISD::CALL";
1443 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1444 case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1445 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1446 }
1447 }
1448
1449 bool
isTruncateFree(Type * Ty1,Type * Ty2) const1450 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1451 EVT MTy1 = EVT::getEVT(Ty1);
1452 EVT MTy2 = EVT::getEVT(Ty2);
1453 if (!MTy1.isSimple() || !MTy2.isSimple()) {
1454 return false;
1455 }
1456 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1457 }
1458
isTruncateFree(EVT VT1,EVT VT2) const1459 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1460 if (!VT1.isSimple() || !VT2.isSimple()) {
1461 return false;
1462 }
1463 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1464 }
1465
1466 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const1467 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1468 switch (Op.getOpcode()) {
1469 default: llvm_unreachable("Should not custom lower this!");
1470 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1471 // Frame & Return address. Currently unimplemented.
1472 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
1473 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
1474 case ISD::GlobalTLSAddress:
1475 llvm_unreachable("TLS not implemented for Hexagon.");
1476 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
1477 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
1478 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
1479 case ISD::VASTART: return LowerVASTART(Op, DAG);
1480 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1481
1482 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1483 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
1484 case ISD::SELECT: return Op;
1485 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1486 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
1487
1488 }
1489 }
1490
1491
1492
1493 //===----------------------------------------------------------------------===//
1494 // Hexagon Scheduler Hooks
1495 //===----------------------------------------------------------------------===//
1496 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * BB) const1497 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1498 MachineBasicBlock *BB)
1499 const {
1500 switch (MI->getOpcode()) {
1501 case Hexagon::ADJDYNALLOC: {
1502 MachineFunction *MF = BB->getParent();
1503 HexagonMachineFunctionInfo *FuncInfo =
1504 MF->getInfo<HexagonMachineFunctionInfo>();
1505 FuncInfo->addAllocaAdjustInst(MI);
1506 return BB;
1507 }
1508 default: llvm_unreachable("Unexpected instr type to insert");
1509 } // switch
1510 }
1511
1512 //===----------------------------------------------------------------------===//
1513 // Inline Assembly Support
1514 //===----------------------------------------------------------------------===//
1515
1516 std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string & Constraint,EVT VT) const1517 HexagonTargetLowering::getRegForInlineAsmConstraint(const
1518 std::string &Constraint,
1519 EVT VT) const {
1520 if (Constraint.size() == 1) {
1521 switch (Constraint[0]) {
1522 case 'r': // R0-R31
1523 switch (VT.getSimpleVT().SimpleTy) {
1524 default:
1525 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
1526 case MVT::i32:
1527 case MVT::i16:
1528 case MVT::i8:
1529 case MVT::f32:
1530 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
1531 case MVT::i64:
1532 case MVT::f64:
1533 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
1534 }
1535 default:
1536 llvm_unreachable("Unknown asm register class");
1537 }
1538 }
1539
1540 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1541 }
1542
1543 /// isFPImmLegal - Returns true if the target can instruction select the
1544 /// specified FP immediate natively. If false, the legalizer will
1545 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT) const1546 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
1547 const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
1548 return QRI->Subtarget.hasV5TOps();
1549 }
1550
1551 /// isLegalAddressingMode - Return true if the addressing mode represented by
1552 /// AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const AddrMode & AM,Type * Ty) const1553 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1554 Type *Ty) const {
1555 // Allows a signed-extended 11-bit immediate field.
1556 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
1557 return false;
1558 }
1559
1560 // No global is ever allowed as a base.
1561 if (AM.BaseGV) {
1562 return false;
1563 }
1564
1565 int Scale = AM.Scale;
1566 if (Scale < 0) Scale = -Scale;
1567 switch (Scale) {
1568 case 0: // No scale reg, "r+i", "r", or just "i".
1569 break;
1570 default: // No scaled addressing mode.
1571 return false;
1572 }
1573 return true;
1574 }
1575
1576 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1577 /// icmp immediate, that is the target has icmp instructions which can compare
1578 /// a register against the immediate without having to materialize the
1579 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const1580 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1581 return Imm >= -512 && Imm <= 511;
1582 }
1583
1584 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1585 /// for tail call optimization. Targets which want to do tail call
1586 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const1587 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
1588 SDValue Callee,
1589 CallingConv::ID CalleeCC,
1590 bool isVarArg,
1591 bool isCalleeStructRet,
1592 bool isCallerStructRet,
1593 const SmallVectorImpl<ISD::OutputArg> &Outs,
1594 const SmallVectorImpl<SDValue> &OutVals,
1595 const SmallVectorImpl<ISD::InputArg> &Ins,
1596 SelectionDAG& DAG) const {
1597 const Function *CallerF = DAG.getMachineFunction().getFunction();
1598 CallingConv::ID CallerCC = CallerF->getCallingConv();
1599 bool CCMatch = CallerCC == CalleeCC;
1600
1601 // ***************************************************************************
1602 // Look for obvious safe cases to perform tail call optimization that do not
1603 // require ABI changes.
1604 // ***************************************************************************
1605
1606 // If this is a tail call via a function pointer, then don't do it!
1607 if (!(dyn_cast<GlobalAddressSDNode>(Callee))
1608 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
1609 return false;
1610 }
1611
1612 // Do not optimize if the calling conventions do not match.
1613 if (!CCMatch)
1614 return false;
1615
1616 // Do not tail call optimize vararg calls.
1617 if (isVarArg)
1618 return false;
1619
1620 // Also avoid tail call optimization if either caller or callee uses struct
1621 // return semantics.
1622 if (isCalleeStructRet || isCallerStructRet)
1623 return false;
1624
1625 // In addition to the cases above, we also disable Tail Call Optimization if
1626 // the calling convention code that at least one outgoing argument needs to
1627 // go on the stack. We cannot check that here because at this point that
1628 // information is not available.
1629 return true;
1630 }
1631